| import asyncio
|
| import json
|
| import os
|
| import logging
|
| from typing import List
|
|
|
|
|
| try:
|
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
| except ModuleNotFoundError:
|
| import subprocess
|
| import sys
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "vaderSentiment"])
|
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
|
|
|
|
| try:
|
| import nltk
|
| from nltk.tokenize import word_tokenize
|
| nltk.download('punkt', quiet=True)
|
| except ImportError:
|
| import subprocess
|
| import sys
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"])
|
| import nltk
|
| from nltk.tokenize import word_tokenize
|
| nltk.download('punkt', quiet=True)
|
|
|
|
|
| from perspectives import (
|
| NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
|
| NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
|
| MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective
|
| )
|
|
|
|
|
| from dotenv import load_dotenv
|
| load_dotenv()
|
| azure_openai_api_key = os.getenv('AZURE_OPENAI_API_KEY')
|
| azure_openai_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
|
|
|
|
|
| def setup_logging(config):
|
| if config.get('logging_enabled', True):
|
| log_level = config.get('log_level', 'DEBUG').upper()
|
| numeric_level = getattr(logging, log_level, logging.DEBUG)
|
| logging.basicConfig(
|
| filename='universal_reasoning.log',
|
| level=numeric_level,
|
| format='%(asctime)s - %(levelname)s - %(message)s'
|
| )
|
| else:
|
| logging.disable(logging.CRITICAL)
|
|
|
|
|
| def load_json_config(file_path):
|
| if not os.path.exists(file_path):
|
| logging.error(f"Configuration file '{file_path}' not found.")
|
| return {}
|
| try:
|
| with open(file_path, 'r') as file:
|
| config = json.load(file)
|
| logging.info(f"Configuration loaded from '{file_path}'.")
|
| return config
|
| except json.JSONDecodeError as e:
|
| logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}")
|
| return {}
|
|
|
|
|
| def analyze_question(question):
|
| tokens = word_tokenize(question)
|
| logging.debug(f"Question tokens: {tokens}")
|
| return tokens
|
|
|
|
|
| class Element:
|
| def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
|
| self.name = name
|
| self.symbol = symbol
|
| self.representation = representation
|
| self.properties = properties
|
| self.interactions = interactions
|
| self.defense_ability = defense_ability
|
|
|
| def execute_defense_function(self):
|
| message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}"
|
| logging.info(message)
|
| return message
|
|
|
|
|
| class CustomRecognizer:
|
| def recognize(self, question):
|
|
|
| if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]):
|
| return RecognizerResult(question)
|
| return RecognizerResult(None)
|
|
|
| def get_top_intent(self, recognizer_result):
|
| if recognizer_result.text:
|
| return "ElementDefense"
|
| else:
|
| return "None"
|
|
|
| class RecognizerResult:
|
| def __init__(self, text):
|
| self.text = text
|
|
|
|
|
| class UniversalReasoning:
|
| def __init__(self, config):
|
| self.config = config
|
| self.perspectives = self.initialize_perspectives()
|
| self.elements = self.initialize_elements()
|
| self.recognizer = CustomRecognizer()
|
|
|
| self.sentiment_analyzer = SentimentIntensityAnalyzer()
|
|
|
| def initialize_perspectives(self):
|
| perspective_names = self.config.get('enabled_perspectives', [
|
| "newton",
|
| "davinci",
|
| "human_intuition",
|
| "neural_network",
|
| "quantum_computing",
|
| "resilient_kindness",
|
| "mathematical",
|
| "philosophical",
|
| "copilot",
|
| "bias_mitigation"
|
| ])
|
| perspective_classes = {
|
| "newton": NewtonPerspective,
|
| "davinci": DaVinciPerspective,
|
| "human_intuition": HumanIntuitionPerspective,
|
| "neural_network": NeuralNetworkPerspective,
|
| "quantum_computing": QuantumComputingPerspective,
|
| "resilient_kindness": ResilientKindnessPerspective,
|
| "mathematical": MathematicalPerspective,
|
| "philosophical": PhilosophicalPerspective,
|
| "copilot": CopilotPerspective,
|
| "bias_mitigation": BiasMitigationPerspective
|
| }
|
| perspectives = []
|
| for name in perspective_names:
|
| cls = perspective_classes.get(name.lower())
|
| if cls:
|
| perspectives.append(cls(self.config))
|
| logging.debug(f"Perspective '{name}' initialized.")
|
| else:
|
| logging.warning(f"Perspective '{name}' is not recognized and will be skipped.")
|
| return perspectives
|
|
|
| def initialize_elements(self):
|
| elements = [
|
| Element(
|
| name="Hydrogen",
|
| symbol="H",
|
| representation="Lua",
|
| properties=["Simple", "Lightweight", "Versatile"],
|
| interactions=["Easily integrates with other languages and systems"],
|
| defense_ability="Evasion"
|
| ),
|
|
|
| Element(
|
| name="Diamond",
|
| symbol="D",
|
| representation="Kotlin",
|
| properties=["Modern", "Concise", "Safe"],
|
| interactions=["Used for Android development"],
|
| defense_ability="Adaptability"
|
| )
|
| ]
|
| return elements
|
|
|
| async def generate_response(self, question):
|
| responses = []
|
| tasks = []
|
|
|
|
|
| for perspective in self.perspectives:
|
| if asyncio.iscoroutinefunction(perspective.generate_response):
|
| tasks.append(perspective.generate_response(question))
|
| else:
|
|
|
| async def sync_wrapper(perspective, question):
|
| return perspective.generate_response(question)
|
| tasks.append(sync_wrapper(perspective, question))
|
|
|
| perspective_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
| for perspective, result in zip(self.perspectives, perspective_results):
|
| if isinstance(result, Exception):
|
| logging.error(f"Error generating response from {perspective.__class__.__name__}: {result}")
|
| else:
|
| responses.append(result)
|
| logging.debug(f"Response from {perspective.__class__.__name__}: {result}")
|
|
|
|
|
| recognizer_result = self.recognizer.recognize(question)
|
| top_intent = self.recognizer.get_top_intent(recognizer_result)
|
| if top_intent == "ElementDefense":
|
| element_name = recognizer_result.text.strip()
|
| element = next(
|
| (el for el in self.elements if el.name.lower() in element_name.lower()),
|
| None
|
| )
|
| if element:
|
| defense_message = element.execute_defense_function()
|
| responses.append(defense_message)
|
| else:
|
| logging.info(f"No matching element found for '{element_name}'")
|
|
|
| ethical_considerations = self.config.get(
|
| 'ethical_considerations',
|
| "Always act with transparency, fairness, and respect for privacy."
|
| )
|
| responses.append(f"**Ethical Considerations:**\n{ethical_considerations}")
|
|
|
| formatted_response = "\n\n".join(responses)
|
| return formatted_response
|
|
|
| def save_response(self, response):
|
| if self.config.get('enable_response_saving', False):
|
| save_path = self.config.get('response_save_path', 'responses.txt')
|
| try:
|
| with open(save_path, 'a', encoding='utf-8') as file:
|
| file.write(response + '\n')
|
| logging.info(f"Response saved to '{save_path}'.")
|
| except Exception as e:
|
| logging.error(f"Error saving response to '{save_path}': {e}")
|
|
|
| def backup_response(self, response):
|
| if self.config.get('backup_responses', {}).get('enabled', False):
|
| backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt')
|
| try:
|
| with open(backup_path, 'a', encoding='utf-8') as file:
|
| file.write(response + '\n')
|
| logging.info(f"Response backed up to '{backup_path}'.")
|
| except Exception as e:
|
| logging.error(f"Error backing up response to '{backup_path}': {e}")
|
|
|
|
|
| if __name__ == "__main__":
|
| config = load_json_config('config.json')
|
|
|
| config['azure_openai_api_key'] = azure_openai_api_key
|
| config['azure_openai_endpoint'] = azure_openai_endpoint
|
| setup_logging(config)
|
| universal_reasoning = UniversalReasoning(config)
|
| question = "Tell me about Hydrogen and its defense mechanisms."
|
| response = asyncio.run(universal_reasoning.generate_response(question))
|
| print(response)
|
| if response:
|
| universal_reasoning.save_response(response)
|
| universal_reasoning.backup_response(response)
|
|
|