Spaces:
Sleeping
Sleeping
| import logging | |
| from telegram import Update | |
| from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes | |
| # Enable logging | |
| logging.basicConfig( | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
| level=logging.INFO | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Global agent instance (will be set by start_telegram_bot) | |
| agent = None | |
| # Command Handlers | |
| async def start(update: Update, context: ContextTypes.DEFAULT_TYPE): | |
| """Send a message when the command /start is issued.""" | |
| user = update.effective_user | |
| await update.message.reply_text( | |
| f'Hi {user.first_name}! I am your AI coding assistant. ' | |
| f'Ask me anything and I will solve it step by step!' | |
| ) | |
| async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE): | |
| """Send a message when the command /help is issued.""" | |
| help_text = """ | |
| I can help you with: | |
| - Coding questions | |
| - Web searches | |
| - Mathematical calculations | |
| - Data analysis | |
| - And much more! | |
| Just send me your question and I'll work on it! | |
| """ | |
| await update.message.reply_text(help_text) | |
| async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE): | |
| """Handle user messages and run agent""" | |
| user_message = update.message.text | |
| user_id = update.effective_user.id | |
| logger.info(f"User {user_id} asked: {user_message}") | |
| # Send "thinking" message | |
| thinking_msg = await update.message.reply_text("🤔 Thinking...") | |
| try: | |
| # Run agent | |
| result = agent.run(user_message, reset=False) | |
| # Extract final answer | |
| final_answer = str(result) | |
| # Send result | |
| await thinking_msg.edit_text(f"✅ Answer:\n\n{final_answer}") | |
| except Exception as e: | |
| logger.error(f"Error processing message: {e}") | |
| await thinking_msg.edit_text( | |
| f"❌ Sorry, I encountered an error: {str(e)}\n\n" | |
| f"Please try rephrasing your question." | |
| ) | |
| async def handle_streaming_message(update: Update, context: ContextTypes.DEFAULT_TYPE): | |
| """Handle messages with streaming updates (advanced)""" | |
| user_message = update.message.text | |
| # Send initial message | |
| status_msg = await update.message.reply_text("🤔 Starting...") | |
| try: | |
| step_count = 0 | |
| for step_log in agent.run(user_message, stream=True, reset=False): | |
| step_count += 1 | |
| # Update status for each step | |
| if hasattr(step_log, 'model_output'): | |
| await status_msg.edit_text( | |
| f"📝 Step {step_count}:\n{step_log.model_output[:500]}..." | |
| ) | |
| # Send final answer | |
| final_answer = str(step_log) | |
| await status_msg.edit_text(f"✅ Final Answer:\n\n{final_answer}") | |
| except Exception as e: | |
| await status_msg.edit_text(f"❌ Error: {str(e)}") | |
| def start_telegram_bot(shared_agent, token="8634464564:AAGL7FzFkMN-Uktf97NKtDs1RFPGxec-HFI"): | |
| """Start the Telegram bot with a shared agent instance in a thread-safe way | |
| Args: | |
| shared_agent: The CodeAgent instance to use for processing messages | |
| token: Telegram bot token (default provided, can be overridden) | |
| """ | |
| import asyncio | |
| global agent | |
| agent = shared_agent | |
| # Create a new event loop for this thread | |
| loop = asyncio.new_event_loop() | |
| asyncio.set_event_loop(loop) | |
| try: | |
| # Create application | |
| application = Application.builder().token(token).build() | |
| # Register handlers | |
| application.add_handler(CommandHandler("start", start)) | |
| application.add_handler(CommandHandler("help", help_command)) | |
| application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message)) | |
| # Start bot with stop_signals=None to avoid signal handler issues in threads | |
| logger.info("Telegram Bot started!") | |
| application.run_polling( | |
| allowed_updates=Update.ALL_TYPES, | |
| stop_signals=None # Disable signal handlers for thread safety | |
| ) | |
| finally: | |
| loop.close() | |
| def main(): | |
| """Standalone entry point (for backward compatibility)""" | |
| from smolagents import CodeAgent, LiteLLMModel | |
| import yaml | |
| from tools.final_answer import FinalAnswerTool | |
| # Use local Ollama model | |
| model = LiteLLMModel( | |
| model_id="ollama/qwen2.5-coder:7b", | |
| api_base="http://localhost:11434", | |
| max_tokens=2096, | |
| temperature=0.5 | |
| ) | |
| # Load prompts | |
| with open("prompts.yaml", 'r') as stream: | |
| prompt_templates = yaml.safe_load(stream) | |
| # Initialize tools | |
| final_answer = FinalAnswerTool() | |
| # Create agent | |
| standalone_agent = CodeAgent( | |
| model=model, | |
| tools=[final_answer], | |
| max_steps=6, | |
| verbosity_level=1, | |
| prompt_templates=prompt_templates | |
| ) | |
| start_telegram_bot(standalone_agent) | |
| if __name__ == '__main__': | |
| main() |