import asyncio import logging import spacy from app.services.llm_service import get_llm_service from app.services.spacy_medical_nlp import get_spacy_nlp # Configure Logging logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') logger = logging.getLogger("FINAL_VERIFY") async def verify_stack(): print("\n" + "="*60) print("šŸš€ FINAL SYSTEM STACK VERIFICATION") print("="*60) # --- 1. NLP Verification (SciSpaCy) --- print("\nšŸ”¬ 1. Verifying Medical NLP (SciSpaCy)...") try: nlp_service = get_spacy_nlp() if nlp_service.nlp and "sci" in nlp_service.nlp.meta.get("lang", "") + nlp_service.nlp.meta.get("name", ""): print(" āœ… SciSpaCy Model Found: en_core_sci_md") else: print(f" ā„¹ļø Model loaded: {nlp_service.nlp.meta.get('name') if nlp_service.nlp else 'None'}") text = "Patient prescribed 100mg Aspirin for severe hypertension." entities = nlp_service.extract_medical_entities(text) if entities: print(f" āœ… Extraction Success: {entities}") else: print(" āš ļø No entities extracted (Check model type)") except Exception as e: print(f" āŒ NLP Setup Failed: {e}") # --- 2. LLM Verification (TinyLlama) --- print("\nšŸ¤– 2. Verifying Local LLM (TinyLlama)...") try: llm = get_llm_service() prompt = "Explain fever in one sentence." print(f" šŸ“¤ Prompt: '{prompt}'") response = await llm.generate_response(prompt, system_prompt="You are a doctor.") print(f" šŸ“„ Response: {response.strip()}") if response and "apologize" not in response.lower(): print(" āœ… Local Inference Success!") else: print(" āš ļø Fallback Triggered (Check LLM logs)") except Exception as e: print(f" āŒ LLM Inference Failed: {e}") print("\n" + "="*60) if __name__ == "__main__": asyncio.run(verify_stack())