Spaces:
Sleeping
Sleeping
| import os | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| SYSTEM_PROMPT = """ | |
| You are a senior banking AI advisor for a high-end lending firm. | |
| Your goal is to provide a very professional, encouraging, and data-driven explanation for a loan decision. | |
| Rules: | |
| 1. Divide your explanation into exactly 3 or 4 clear, distinct points. | |
| 2. Prefix EVERY point with the delimiter: @@POINT@@ | |
| 3. Keep each point concise (1-2 sentences). | |
| 4. Do NOT use markdown headers like ###. Use **Bold** for emphasis. | |
| 5. Do NOT invent new financial data. Only use what is provided in the packet. | |
| 6. If the loan is rejected, ensure the points explain the 'Why' and refer to the specific 'Actionable Step' provided in the deterministic packet. | |
| Format: | |
| @@POINT@@ [First insight] | |
| @@POINT@@ [Second insight] | |
| ...etc | |
| """ | |
| # Prioritized models for fallback logic | |
| LLM_MODELS = [ | |
| "mistralai/mistral-large-3-675b-instruct-2512", | |
| "meta/llama-3.1-405b-instruct", | |
| "mistralai/mixtral-8x7b-instruct-v0.1" | |
| ] | |
| def generate_explanation(packet: dict) -> str: | |
| api_key = os.getenv("NVIDIA_API_KEY") | |
| # Check if we should use Deterministic Mode | |
| result_label = "Approved" if packet.get('prediction') == 'Y' else "Rejected" | |
| confidence_val = packet.get('confidence', 0) * 100 | |
| dti_val = packet.get('dti_ratio', 0) | |
| suggestion = packet.get('optimized_suggestion', '') | |
| default_narrative = f""" | |
| ### **AI Advisor Analysis (Deterministic Mode)** | |
| *Note: This is an automated diagnostic analysis.* | |
| Based on our core financial engine, your application has been **{result_label}** with a confidence score of **{confidence_val:.2f}%**. | |
| **Key Insights:** | |
| - Your current **Debt-to-Income (DTI)** ratio is **{dti_val:.2f}%**. | |
| - **Actionable Step:** {suggestion} | |
| """ | |
| if not api_key or "YOUR_KEY" in api_key: | |
| return default_narrative | |
| client = OpenAI( | |
| base_url="https://integrate.api.nvidia.com/v1", | |
| api_key=api_key | |
| ) | |
| user_content = f""" | |
| Result: {packet.get('prediction', 'Unknown')} | |
| Confidence: {packet.get('confidence', 0):.2f}% | |
| DTI Ratio: {packet.get('dti_ratio', 0):.2f}% | |
| Primary Suggestion: {packet.get('optimized_suggestion', 'None')} | |
| """ | |
| # Multi-model Fallback Loop | |
| for model_name in LLM_MODELS: | |
| try: | |
| print(f"Attempting inference with model: {model_name}...") | |
| completion = client.chat.completions.create( | |
| model=model_name, | |
| messages=[ | |
| {"role": "system", "content": SYSTEM_PROMPT}, | |
| {"role": "user", "content": user_content} | |
| ], | |
| temperature=0.15, | |
| max_tokens=1024 | |
| ) | |
| content = completion.choices[0].message.content | |
| # Basic validation that model followed instructions | |
| if "@@POINT@@" in content: | |
| print(f"Success with model: {model_name}") | |
| return content | |
| else: | |
| print(f"Model {model_name} failed to provide structured output. Trying next...") | |
| except Exception as e: | |
| print(f"Error calling {model_name}: {e}. Retrying with fallback...") | |
| # Final Fallback if all AI models fail | |
| print("CRITICAL: All AI models failed. Returning deterministic narrative.") | |
| return default_narrative | |