| import asyncio
|
| import aiohttp
|
| from telegram import Update
|
| from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
|
| import logging
|
|
|
|
|
| BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
|
| LLAMA_API_URL = "http://127.0.0.1:8080/completion"
|
|
|
|
|
| logging.basicConfig(
|
| format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
|
| )
|
|
|
|
|
| async def get_llama_response(prompt: str) -> str:
|
| system_prompt = f"User: {prompt}\nAssistant:"
|
| payload = {
|
| "prompt": system_prompt,
|
| "max_tokens": 64,
|
| "temperature": 0.7,
|
| "stop": ["</s>", "User:"]
|
| }
|
| try:
|
| async with aiohttp.ClientSession() as session:
|
| async with session.post(LLAMA_API_URL, json=payload, timeout=60) as resp:
|
| if resp.status == 200:
|
| data = await resp.json()
|
| return data.get("content", "").strip()
|
| else:
|
| logging.error(f"LLaMA API Error: {resp.status}")
|
| return "❌ خطا در دریافت پاسخ از مدل زبان."
|
| except asyncio.TimeoutError:
|
| return "⏱️ مدل دیر پاسخ داد. لطفاً دوباره تلاش کنید."
|
| except Exception as e:
|
| logging.exception("خطا در ارتباط با مدل:")
|
| return "⚠️ خطا در پردازش درخواست شما."
|
|
|
|
|
| async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
| message = update.message
|
| if message and message.text:
|
| user_input = message.text.lstrip('/')
|
| await message.chat.send_action("typing")
|
| response = await get_llama_response(user_input)
|
| await message.reply_text(response)
|
|
|
|
|
| def main():
|
| app = ApplicationBuilder().token(BOT_TOKEN).build()
|
| app.add_handler(MessageHandler(filters.COMMAND, handle_command))
|
| app.run_polling()
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|