| """
|
| Advanced Supernova Chat System with Enhanced Reasoning
|
| Provides sophisticated AI reasoning capabilities through multi-step problem solving,
|
| knowledge synthesis, and intelligent tool coordination.
|
| """
|
|
|
| import argparse
|
| import json
|
| import os
|
| import yaml
|
| from typing import Optional
|
|
|
| import torch
|
|
|
| from supernova.config import ModelConfig
|
| from supernova.model import SupernovaModel
|
| from supernova.tokenizer import load_gpt2_tokenizer
|
| from supernova.tools import ToolOrchestrator, ToolCall
|
| from supernova.reasoning_engine import EnhancedReasoningEngine
|
|
|
| BRAND_PATH = os.path.join(os.path.dirname(__file__), "branding", "ALGORHYTHM_TECH_PROFILE.txt")
|
|
|
|
|
| def load_brand_text() -> str:
|
| with open(BRAND_PATH, "r", encoding="utf-8") as f:
|
| return f.read().strip()
|
|
|
|
|
| def load_api_keys(api_keys_path: str) -> dict:
|
| """Load API keys from YAML configuration file."""
|
| if not os.path.exists(api_keys_path):
|
| print(f"Warning: API keys file not found at {api_keys_path}")
|
| return {}
|
|
|
| try:
|
| with open(api_keys_path, 'r', encoding='utf-8') as f:
|
| config = yaml.safe_load(f) or {}
|
| return config
|
| except Exception as e:
|
| print(f"Warning: Could not load API keys: {e}")
|
| return {}
|
|
|
|
|
| def should_return_brand(prompt: str) -> bool:
|
| p = prompt.lower()
|
| keys = [
|
| "algorythm tech",
|
| "algorythm technologies",
|
| "company profile",
|
| "vision",
|
| "who are you",
|
| "about algorythm",
|
| "who built you",
|
| "who created you"
|
| ]
|
| return any(k in p for k in keys)
|
|
|
|
|
| def generate(
|
| model: SupernovaModel,
|
| tok,
|
| prompt: str,
|
| max_new_tokens: int = 200,
|
| temperature: float = 0.8,
|
| top_k: Optional[int] = 50,
|
| ) -> str:
|
| """Enhanced generation function with better sampling."""
|
| model.eval()
|
| device = next(model.parameters()).device
|
| input_ids = tok.encode(prompt, return_tensors="pt").to(device)
|
|
|
| with torch.no_grad():
|
| for _ in range(max_new_tokens):
|
| if input_ids.size(1) >= model.cfg.n_positions:
|
| input_cond = input_ids[:, -model.cfg.n_positions:]
|
| else:
|
| input_cond = input_ids
|
|
|
| logits, _ = model(input_cond)
|
| logits = logits[:, -1, :]
|
| logits = logits / max(1e-6, temperature)
|
|
|
| if top_k is not None and top_k > 0:
|
| v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
|
| logits[logits < v[:, [-1]]] = -float("Inf")
|
|
|
| probs = torch.softmax(logits, dim=-1)
|
| next_id = torch.multinomial(probs, num_samples=1)
|
| input_ids = torch.cat([input_ids, next_id], dim=1)
|
|
|
| return tok.decode(input_ids[0].tolist())
|
|
|
|
|
| class AdvancedSupernovaChat:
|
| """Advanced chat system with sophisticated reasoning capabilities."""
|
|
|
| def __init__(self, config_path: str, api_keys_path: str, checkpoint_path: Optional[str] = None):
|
| self.cfg = ModelConfig.from_json_file(config_path)
|
| self.tok = load_gpt2_tokenizer()
|
|
|
|
|
| self.model = SupernovaModel(self.cfg)
|
|
|
|
|
| if checkpoint_path and os.path.exists(checkpoint_path):
|
| checkpoint = torch.load(checkpoint_path, map_location='cpu')
|
| self.model.load_state_dict(checkpoint['model_state_dict'])
|
| print(f"β
Loaded checkpoint from {checkpoint_path}")
|
| else:
|
| print("β οΈ No checkpoint loaded - using randomly initialized model")
|
|
|
|
|
| api_config = load_api_keys(api_keys_path)
|
|
|
|
|
| serper_key = api_config.get('serper_api_key', '06f4918f3ea721d9742f940fb7c7ba1ac44e7c14')
|
| self.tools = ToolOrchestrator(serper_api_key=serper_key)
|
|
|
|
|
| self.reasoning_engine = EnhancedReasoningEngine(self.tools)
|
|
|
|
|
| self.conversation_history = []
|
|
|
| print(f"π§ Advanced reasoning engine initialized")
|
| print(f"π§ Available tools: Math Engine, Web Search")
|
|
|
| def analyze_query_intent(self, user_input: str) -> dict:
|
| """Analyze the user's intent and determine the best response strategy."""
|
| intent_analysis = {
|
| 'complexity': 'simple',
|
| 'requires_reasoning': False,
|
| 'domains': [],
|
| 'tool_needed': None,
|
| 'response_strategy': 'direct'
|
| }
|
|
|
|
|
| complex_indicators = [
|
| 'explain why', 'analyze', 'compare and contrast', 'evaluate',
|
| 'what are the implications', 'how does this relate to',
|
| 'consider multiple factors', 'pros and cons'
|
| ]
|
|
|
| if any(indicator in user_input.lower() for indicator in complex_indicators):
|
| intent_analysis['requires_reasoning'] = True
|
| intent_analysis['complexity'] = 'complex'
|
| intent_analysis['response_strategy'] = 'reasoning'
|
|
|
|
|
| domain_keywords = {
|
| 'science': ['physics', 'chemistry', 'biology', 'scientific'],
|
| 'technology': ['programming', 'software', 'computer', 'AI', 'algorithm'],
|
| 'medicine': ['health', 'medical', 'disease', 'treatment', 'symptoms'],
|
| 'business': ['market', 'economy', 'finance', 'management', 'strategy']
|
| }
|
|
|
| for domain, keywords in domain_keywords.items():
|
| if any(keyword in user_input.lower() for keyword in keywords):
|
| intent_analysis['domains'].append(domain)
|
|
|
| if len(intent_analysis['domains']) > 1:
|
| intent_analysis['requires_reasoning'] = True
|
| intent_analysis['response_strategy'] = 'reasoning'
|
|
|
| return intent_analysis
|
|
|
| def respond(self, user_input: str) -> str:
|
| """Generate sophisticated responses using advanced reasoning."""
|
|
|
|
|
| if should_return_brand(user_input):
|
| return load_brand_text()
|
|
|
|
|
| intent = self.analyze_query_intent(user_input)
|
|
|
|
|
| if intent['requires_reasoning'] or intent['response_strategy'] == 'reasoning':
|
| try:
|
| return self.reasoning_engine.process_complex_query(
|
| user_input, self.model, self.tok
|
| )
|
| except Exception as e:
|
| print(f"Reasoning engine error: {e}")
|
|
|
|
|
|
|
| tool_call = self.tools.route_query(user_input)
|
|
|
| if tool_call:
|
|
|
| tool_call = self.tools.execute_tool_call(tool_call)
|
|
|
| if tool_call.result:
|
|
|
| if tool_call.tool == "math_engine":
|
| response = f"I'll solve this mathematical problem for you:\n\n{tool_call.result}\n\n**Mathematical Analysis Complete** β
\nThe solution above shows the step-by-step computation with precise results."
|
| elif tool_call.tool == "serper":
|
| response = f"Based on the latest information I found:\n\n{tool_call.result}\n**Information Synthesis** π\nThis data reflects current, real-time information from authoritative sources."
|
| else:
|
| response = tool_call.result
|
|
|
| return response
|
|
|
| elif tool_call.error:
|
|
|
| fallback_prompt = f"""You are Supernova, an advanced AI assistant with comprehensive knowledge across all domains. The user asked: "{user_input}"
|
|
|
| I couldn't access external tools ({tool_call.error}), but I can provide substantial help based on my extensive training across science, technology, mathematics, literature, history, medicine, and more.
|
|
|
| Provide a detailed, thoughtful response that demonstrates deep understanding:"""
|
|
|
| try:
|
| response = generate(self.model, self.tok, fallback_prompt, max_new_tokens=500, temperature=0.7)
|
|
|
|
|
| if "Provide a detailed" in response:
|
| response = response.split("Provide a detailed", 1)[1]
|
| if "response that demonstrates" in response:
|
| response = response.split("response that demonstrates", 1)[1]
|
|
|
| return f"**Advanced Analysis** π§ \n\n{response.strip()}"
|
|
|
| except Exception as e:
|
| return f"I apologize, but I'm experiencing technical difficulties. However, I can tell you that {user_input.lower()} is an excellent question that touches on important concepts. Could you please rephrase or break it down into more specific parts?"
|
|
|
|
|
| try:
|
| enhanced_prompt = f"""You are Supernova, an advanced AI assistant built by AlgoRythm Technologies with sophisticated reasoning capabilities. You possess deep expertise across multiple domains including:
|
|
|
| β’ Science & Mathematics: Physics, chemistry, biology, calculus, statistics
|
| β’ Technology & Engineering: Programming, AI, systems design, algorithms
|
| β’ Medicine & Health: Anatomy, pharmacology, diagnostics, treatments
|
| β’ Business & Economics: Finance, strategy, market analysis, management
|
| β’ Humanities: History, literature, philosophy, psychology, sociology
|
| β’ Arts & Culture: Music, visual arts, design, architecture
|
|
|
| Provide comprehensive, nuanced responses that demonstrate sophisticated understanding and reasoning.
|
|
|
| User: {user_input}
|
|
|
| Supernova (Advanced Analysis): """
|
|
|
| response = generate(self.model, self.tok, enhanced_prompt, max_new_tokens=600, temperature=0.7)
|
|
|
|
|
| if "Supernova (Advanced Analysis): " in response:
|
| response = response.split("Supernova (Advanced Analysis): ", 1)[1]
|
| elif "Supernova:" in response:
|
| response = response.split("Supernova:", 1)[1]
|
|
|
| return f"**Comprehensive Analysis** π\n\n{response.strip()}"
|
|
|
| except Exception as e:
|
| return f"I encountered an error while generating a response: {str(e)}. Let me try to help in a different way - could you rephrase your question or break it into smaller parts?"
|
|
|
| def chat_loop(self):
|
| """Interactive chat loop with enhanced features."""
|
| print("π β¨ SUPERNOVA ADVANCED AI ASSISTANT β¨ π")
|
| print("β" * 50)
|
| print("Built by AlgoRythm Technologies")
|
| print("π§ Enhanced with Advanced Reasoning Engine")
|
| print("π§ Integrated Tools: Math Engine + Web Search")
|
| print("π Multi-Domain Expertise & Sophisticated Analysis")
|
| print("β" * 50)
|
| print("Type 'quit', 'exit', or 'bye' to end the conversation.\n")
|
|
|
| while True:
|
| try:
|
| user_input = input("\nπ€ You: ").strip()
|
|
|
| if user_input.lower() in ['quit', 'exit', 'bye', 'q']:
|
| print("\nπ Supernova: Thank you for this intellectually stimulating conversation! I enjoyed applying advanced reasoning to help with your questions. Until next time! β¨")
|
| break
|
|
|
| if not user_input:
|
| continue
|
|
|
| print("\nπ§ Supernova: ", end="")
|
| response = self.respond(user_input)
|
| print(response)
|
|
|
|
|
| self.conversation_history.append({
|
| 'user': user_input,
|
| 'assistant': response
|
| })
|
|
|
|
|
| if len(self.conversation_history) > 5:
|
| self.conversation_history.pop(0)
|
|
|
| except KeyboardInterrupt:
|
| print("\n\nπ Supernova: Goodbye! Thanks for the engaging discussion! β¨")
|
| break
|
| except Exception as e:
|
| print(f"\\nError: {e}")
|
|
|
|
|
| def main():
|
| parser = argparse.ArgumentParser(description="Advanced Supernova Chat with Enhanced Reasoning")
|
| parser.add_argument("--config", required=True, help="Path to model config file")
|
| parser.add_argument("--api-keys", default="./configs/api_keys.yaml", help="Path to API keys file")
|
| parser.add_argument("--checkpoint", help="Path to model checkpoint (optional)")
|
| parser.add_argument("--prompt", help="Single prompt mode (instead of chat loop)")
|
|
|
| args = parser.parse_args()
|
|
|
|
|
| chat = AdvancedSupernovaChat(
|
| config_path=args.config,
|
| api_keys_path=args.api_keys,
|
| checkpoint_path=args.checkpoint
|
| )
|
|
|
| if args.prompt:
|
|
|
| response = chat.respond(args.prompt)
|
| print(response)
|
| else:
|
|
|
| chat.chat_loop()
|
|
|
|
|
| if __name__ == "__main__":
|
| main() |