Spaces:
Runtime error
Runtime error
| from dotenv import load_dotenv | |
| import os | |
| from qdrant_client import QdrantClient | |
| from sentence_transformers import SentenceTransformer | |
| from groq import Groq | |
| # Load environment variables | |
| load_dotenv() | |
| QDRANT_URL = os.getenv("QDRANT_URL") | |
| QDRANT_API_KEY = os.getenv("QDRANT_API_KEY") | |
| GROQ_API_KEY = os.getenv("GROQ_API_KEY") | |
| COLLECTION_NAME = "student_materials" | |
| # Connect to Qdrant | |
| client = QdrantClient( | |
| url=QDRANT_URL, | |
| api_key=QDRANT_API_KEY, | |
| ) | |
| # Initialize Groq client | |
| groq_client = Groq(api_key=GROQ_API_KEY) | |
| # Embedding model | |
| embedder = SentenceTransformer("intfloat/e5-large") | |
| def format_payload(p): | |
| """Make context clearer and reduce mixing between sheets/courses.""" | |
| text = p.payload.get("text", "") | |
| course = p.payload.get("course", "Unknown Course") | |
| sheet = p.payload.get("sheet_number", "Unknown Sheet") | |
| return f"[COURSE: {course} | SHEET: {sheet}]\n{text}" | |
| def search_qdrant(query): | |
| """Search Qdrant and return best chunks.""" | |
| # تحسين الـ query عشان الـ embedding يفهمه أحسن | |
| enhanced_query = f"query: {query}" | |
| vec = embedder.encode(enhanced_query).tolist() | |
| results = client.query_points( | |
| collection_name=COLLECTION_NAME, | |
| query=vec, | |
| limit=5, # زودت العدد عشان نجيب نتائج أكتر | |
| ) | |
| chunks = [] | |
| print(f"📊 Found {len(results.points)} relevant chunks:") | |
| for i, p in enumerate(results.points, 1): | |
| print(f" {i}. Score: {p.score:.4f} | Course: {p.payload.get('course', 'N/A')} | Sheet: {p.payload.get('sheet_number', 'N/A')}") | |
| chunks.append(format_payload(p)) | |
| return "\n\n---\n\n".join(chunks) | |
| def rag_answer(question): | |
| """Generate answer using Groq's LLM with RAG context.""" | |
| print("\n🔍 Searching Qdrant...") | |
| context = search_qdrant(question) | |
| if not context: | |
| context = "No relevant context found." | |
| print("🤖 Generating answer using Groq...\n") | |
| instructional_prompt = f""" | |
| You are an academic AI assistant. | |
| Use the retrieved context below to answer the question. | |
| If the answer exists in the context, extract it directly. | |
| If the context does NOT contain enough information, you may use your own general knowledge — but keep the answer accurate and concise. | |
| Context: | |
| {context} | |
| Question: | |
| {question} | |
| Answer: | |
| """ | |
| # Groq chat completion | |
| chat_completion = groq_client.chat.completions.create( | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": instructional_prompt | |
| } | |
| ], | |
| model="llama-3.3-70b-versatile", # أو "mixtral-8x7b-32768" للسرعة الأعلى | |
| temperature=0.1, # خليتها أقل عشان يلتزم بالـ context | |
| max_tokens=1024, | |
| top_p=1, | |
| stream=False | |
| ) | |
| return chat_completion.choices[0].message.content | |
| def rag_answer_stream(question): | |
| """Generate answer using Groq's LLM with RAG context (Streaming).""" | |
| print("\n🔍 Searching Qdrant...") | |
| context = search_qdrant(question) | |
| if not context: | |
| context = "No relevant context found." | |
| print("🤖 Generating answer using Groq (Streaming)...\n") | |
| instructional_prompt = f""" | |
| You are an academic AI assistant. | |
| Use the retrieved context below to answer the question. | |
| If the answer exists in the context, extract it directly. | |
| If the context does NOT contain enough information, you may use your own general knowledge — but keep the answer accurate and concise. | |
| Context: | |
| {context} | |
| Question: | |
| {question} | |
| Answer: | |
| """ | |
| stream = groq_client.chat.completions.create( | |
| messages=[{"role": "user", "content": instructional_prompt}], | |
| model="llama-3.3-70b-versatile", | |
| temperature=0.1, | |
| max_tokens=1024, | |
| top_p=1, | |
| stream=True | |
| ) | |
| for chunk in stream: | |
| content = chunk.choices[0].delta.content | |
| if content: | |
| yield content | |
| if __name__ == "__main__": | |
| print("=" * 60) | |
| print("📚 Academic RAG System (Powered by Groq)") | |
| print("=" * 60) | |
| user_q = input("\n💬 Enter your question: ") | |
| try: | |
| answer = rag_answer(user_q) | |
| print("\n" + "=" * 60) | |
| print("✅ AI Response:") | |
| print("=" * 60) | |
| print(answer) | |
| print("=" * 60) | |
| except Exception as e: | |
| print(f"\n❌ Error: {e}") |