|
|
| import asyncio |
| import json |
| import os |
| import logging |
| from typing import List |
|
|
| |
| try: |
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer |
| except ModuleNotFoundError: |
| import subprocess |
| import sys |
| subprocess.check_call([sys.executable, "-m", "pip", "install", "vaderSentiment"]) |
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer |
|
|
| |
| try: |
| import nltk |
| from nltk.tokenize import word_tokenize |
| nltk.download('punkt', quiet=True) |
| except ImportError: |
| import subprocess |
| import sys |
| subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"]) |
| import nltk |
| from nltk.tokenize import word_tokenize |
| nltk.download('punkt', quiet=True) |
|
|
| |
| from perspectives import ( |
| NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective, |
| NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective, |
| MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective |
| ) |
|
|
| def setup_logging(config): |
| if config.get('logging_enabled', True): |
| log_level = config.get('log_level', 'DEBUG').upper() |
| numeric_level = getattr(logging, log_level, logging.DEBUG) |
| logging.basicConfig( |
| filename='universal_reasoning.log', |
| level=numeric_level, |
| format='%(asctime)s - %(levelname)s - %(message)s' |
| ) |
| else: |
| logging.disable(logging.CRITICAL) |
|
|
| def load_json_config(file_path): |
| if not os.path.exists(file_path): |
| logging.error(f"Configuration file '{file_path}' not found.") |
| return {} |
| try: |
| with open(file_path, 'r') as file: |
| config = json.load(file) |
| logging.info(f"Configuration loaded from '{file_path}'.") |
| config['allow_network_calls'] = False |
| return config |
| except json.JSONDecodeError as e: |
| logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}") |
| return {} |
|
|
| def analyze_question(question): |
| tokens = word_tokenize(question) |
| logging.debug(f"Question tokens: {tokens}") |
| return tokens |
|
|
| class Element: |
| def __init__(self, name, symbol, representation, properties, interactions, defense_ability): |
| self.name = name |
| self.symbol = symbol |
| self.representation = representation |
| self.properties = properties |
| self.interactions = interactions |
| self.defense_ability = defense_ability |
|
|
| def execute_defense_function(self): |
| message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}" |
| logging.info(message) |
| return message |
|
|
| class CustomRecognizer: |
| def recognize(self, question): |
| if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]): |
| return RecognizerResult(question) |
| return RecognizerResult(None) |
|
|
| def get_top_intent(self, recognizer_result): |
| return "ElementDefense" if recognizer_result.text else "None" |
|
|
| class RecognizerResult: |
| def __init__(self, text): |
| self.text = text |
|
|
| class UniversalReasoning: |
| def __init__(self, config): |
| self.config = config |
| self.perspectives = self.initialize_perspectives() |
| self.elements = self.initialize_elements() |
| self.recognizer = CustomRecognizer() |
| self.sentiment_analyzer = SentimentIntensityAnalyzer() |
|
|
| def initialize_perspectives(self): |
| perspective_names = self.config.get('enabled_perspectives', [ |
| "newton", "davinci", "human_intuition", "neural_network", "quantum_computing", |
| "resilient_kindness", "mathematical", "philosophical", "copilot", "bias_mitigation" |
| ]) |
| perspective_classes = { |
| "newton": NewtonPerspective, |
| "davinci": DaVinciPerspective, |
| "human_intuition": HumanIntuitionPerspective, |
| "neural_network": NeuralNetworkPerspective, |
| "quantum_computing": QuantumComputingPerspective, |
| "resilient_kindness": ResilientKindnessPerspective, |
| "mathematical": MathematicalPerspective, |
| "philosophical": PhilosophicalPerspective, |
| "copilot": CopilotPerspective, |
| "bias_mitigation": BiasMitigationPerspective |
| } |
| perspectives = [] |
| for name in perspective_names: |
| cls = perspective_classes.get(name.lower()) |
| if cls: |
| perspectives.append(cls(self.config)) |
| logging.debug(f"Perspective '{name}' initialized.") |
| return perspectives |
|
|
| def initialize_elements(self): |
| return [ |
| Element("Hydrogen", "H", "Lua", ["Simple", "Lightweight", "Versatile"], |
| ["Integrates with other languages"], "Evasion"), |
| Element("Diamond", "D", "Kotlin", ["Modern", "Concise", "Safe"], |
| ["Used for Android development"], "Adaptability") |
| ] |
|
|
| async def generate_response(self, question): |
| responses = [] |
| tasks = [] |
|
|
| for perspective in self.perspectives: |
| if asyncio.iscoroutinefunction(perspective.generate_response): |
| tasks.append(perspective.generate_response(question)) |
| else: |
| async def sync_wrapper(perspective, question): |
| return perspective.generate_response(question) |
| tasks.append(sync_wrapper(perspective, question)) |
|
|
| perspective_results = await asyncio.gather(*tasks, return_exceptions=True) |
|
|
| for perspective, result in zip(self.perspectives, perspective_results): |
| if isinstance(result, Exception): |
| logging.error(f"Error from {perspective.__class__.__name__}: {result}") |
| else: |
| responses.append(result) |
|
|
| recognizer_result = self.recognizer.recognize(question) |
| top_intent = self.recognizer.get_top_intent(recognizer_result) |
| if top_intent == "ElementDefense": |
| element_name = recognizer_result.text.strip() |
| element = next((el for el in self.elements if el.name.lower() in element_name.lower()), None) |
| if element: |
| responses.append(element.execute_defense_function()) |
|
|
| ethical = self.config.get("ethical_considerations", "Act transparently and respectfully.") |
| responses.append(f"**Ethical Considerations:**\n{ethical}") |
|
|
| return "\n\n".join(responses) |
|
|
| def save_response(self, response): |
| if self.config.get('enable_response_saving', False): |
| path = self.config.get('response_save_path', 'responses.txt') |
| with open(path, 'a', encoding='utf-8') as file: |
| file.write(response + '\n') |
|
|
| def backup_response(self, response): |
| if self.config.get('backup_responses', {}).get('enabled', False): |
| backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt') |
| with open(backup_path, 'a', encoding='utf-8') as file: |
| file.write(response + '\n') |
|
|