Spaces:
Running
Reduce startup brittleness in the Rhythma experience
Browse filesSplit the monolithic backend into focused engine and analysis modules, keep the public rhythma import surface stable, trim obvious dependency and logging noise, and clean up the Gradio examples so the advanced-setting columns render intentionally. This keeps the app easier to change while avoiding a behavioral rewrite.
Constraint: The Hugging Face Space currently relies on app.py importing from rhythma
Constraint: Optional dependencies must not block module import when unavailable locally
Rejected: Full product-behavior redesign in the same change | too broad for a safe push
Rejected: Eager embedding initialization at startup | slows startup and increases failure surface
Confidence: medium
Scope-risk: moderate
Reversibility: clean
Directive: Keep optional provider initialization lazy and preserve the public rhythma facade unless app.py is updated in the same change
Tested: python -m py_compile app.py rhythma.py rhythma_engine.py rhythma_analysis.py tests/test_rhythma_regression.py
Tested: python -c "from rhythma import RhythmaModulationEngine, RhythmaSymphAICore; core = RhythmaSymphAICore(use_groq=False, use_embeddings=False); result = core.analyze_input('feeling stressed about work'); wave = RhythmaModulationEngine(emotional_state='stressed').generate_modulated_wave(0.1); print(result['emotional_state'], result['rhythm_pattern'], len(wave))"
Not-tested: Full Gradio UI startup in this environment because gradio is not installed locally
Not-tested: End-to-end Hugging Face Space runtime after deployment
- README.md +20 -1
- __pycache__/app.cpython-312.pyc +0 -0
- app.py +57 -105
- requirements.txt +1 -5
- rhythma.py +8 -663
- rhythma_analysis.py +259 -0
- rhythma_engine.py +377 -0
- tests/test_rhythma_regression.py +34 -0
|
@@ -13,4 +13,23 @@ thumbnail: >-
|
|
| 13 |
short_description: 🔊Reverse Active Denial System
|
| 14 |
---
|
| 15 |
|
| 16 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
short_description: 🔊Reverse Active Denial System
|
| 14 |
---
|
| 15 |
|
| 16 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
| 17 |
+
|
| 18 |
+
## Local Development
|
| 19 |
+
|
| 20 |
+
Install the dependencies, then run the Gradio app:
|
| 21 |
+
|
| 22 |
+
```bash
|
| 23 |
+
pip install -r requirements.txt
|
| 24 |
+
python app.py
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
Set `GROQ_API_KEY` to enable Groq-backed text classification and audio transcription.
|
| 28 |
+
|
| 29 |
+
## Project Structure
|
| 30 |
+
|
| 31 |
+
- `app.py`: Gradio interface and request pipeline
|
| 32 |
+
- `rhythma_engine.py`: waveform generation, audio export, and visualization
|
| 33 |
+
- `rhythma_analysis.py`: text/audio analysis, optional Groq integration, and result shaping
|
| 34 |
+
- `rhythma.py`: compatibility facade for the public classes
|
| 35 |
+
- `tests/test_rhythma_regression.py`: regression tests for the core behavior
|
|
Binary file (11.2 kB). View file
|
|
|
|
@@ -1,129 +1,106 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
-
import numpy as np
|
| 4 |
import matplotlib
|
| 5 |
-
matplotlib.use('Agg')
|
| 6 |
-
import matplotlib.pyplot as plt
|
| 7 |
-
from PIL import Image
|
| 8 |
-
import soundfile as sf
|
| 9 |
import tempfile
|
| 10 |
import time
|
| 11 |
-
from rhythma import RhythmaModulationEngine, RhythmaSymphAICore
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# --- Environment Variable Check ---
|
| 14 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 15 |
use_groq = bool(GROQ_API_KEY) # True only if key exists and is not empty
|
| 16 |
|
| 17 |
if not use_groq:
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
print(" Falling back to local analysis methods.")
|
| 22 |
-
print("*"*40)
|
| 23 |
else:
|
| 24 |
-
|
| 25 |
# --- End Environment Variable Check ---
|
| 26 |
|
| 27 |
|
| 28 |
# --- Initialize Core Components ---
|
| 29 |
try:
|
| 30 |
-
# Pass the determined use_groq flag to the core
|
| 31 |
symphai_core = RhythmaSymphAICore(use_groq=use_groq)
|
| 32 |
except Exception as e:
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
symphai_core = None # Indicate failure
|
| 36 |
# --- End Initialization ---
|
| 37 |
|
| 38 |
|
| 39 |
# --- Core Functions ---
|
| 40 |
def analyze_input(input_text=None, audio_input=None):
|
| 41 |
-
"""Analyze user input using the SymphAI Core."""
|
| 42 |
if symphai_core is None:
|
| 43 |
return {"error": "Analysis Core failed to initialize."}
|
| 44 |
|
| 45 |
-
# Ensure audio_input is a filepath string or None
|
| 46 |
audio_filepath = audio_input if isinstance(audio_input, str) else None
|
| 47 |
-
|
| 48 |
-
# Pass to SymphAI Core for analysis
|
| 49 |
-
# Add default empty string for input_text if None, as core expects string or None
|
| 50 |
return symphai_core.analyze_input(input_text or "", audio_filepath)
|
| 51 |
|
| 52 |
|
| 53 |
def generate_modulated_experience(analysis_result, base_freq=None, modulation_type="sine", rhythm_pattern=None, duration=5):
|
| 54 |
-
"""Generate a complete modulated experience based on analysis and parameters."""
|
| 55 |
-
print(f"DEBUG: generate_modulated_experience received analysis: {analysis_result}")
|
| 56 |
-
print(f"DEBUG: Overrides - Freq: {base_freq}, Mod: {modulation_type}, Rhythm: {rhythm_pattern}, Dur: {duration}")
|
| 57 |
-
|
| 58 |
-
# --- Input Validation ---
|
| 59 |
if not isinstance(analysis_result, dict):
|
| 60 |
-
error_msg = "Internal
|
| 61 |
-
|
| 62 |
return error_msg, None, None, None, None
|
| 63 |
|
| 64 |
-
if
|
| 65 |
error_msg = f"Analysis Error: {analysis_result['error']}"
|
| 66 |
-
|
| 67 |
-
# Return the error message clearly for the analysis output
|
| 68 |
return error_msg, None, None, None, None
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
# --- Determine Final Parameters ---
|
| 75 |
-
# Use manual override if provided and valid, otherwise use analysis result
|
| 76 |
-
final_rhythm_pattern = rhythm_pattern if rhythm_pattern else rhythm_pattern_from_analysis
|
| 77 |
-
# Use manual frequency override ONLY if it's > 0
|
| 78 |
-
final_base_freq = base_freq if base_freq and base_freq > 0 else None # Pass None to let engine use emotion/default
|
| 79 |
-
|
| 80 |
-
print(f"DEBUG: Engine Params - Emotion: {emotional_state}, Freq Override: {final_base_freq}, Rhythm: {final_rhythm_pattern}, Mod: {modulation_type}")
|
| 81 |
|
| 82 |
try:
|
| 83 |
-
# --- Initialize the Rhythma Engine ---
|
| 84 |
engine = RhythmaModulationEngine(
|
| 85 |
-
base_freq=final_base_freq,
|
| 86 |
modulation_type=modulation_type,
|
| 87 |
rhythm_pattern=final_rhythm_pattern,
|
| 88 |
-
emotional_state=emotional_state if not final_base_freq else None
|
| 89 |
)
|
| 90 |
|
| 91 |
-
# --- Generate Outputs ---
|
| 92 |
timestamp = int(time.time())
|
| 93 |
temp_dir = tempfile.gettempdir()
|
| 94 |
-
# Ensure temp_dir exists (useful in some restricted environments)
|
| 95 |
os.makedirs(temp_dir, exist_ok=True)
|
| 96 |
audio_file = os.path.join(temp_dir, f"rhythma_{timestamp}.wav")
|
| 97 |
|
| 98 |
-
# Generate and save audio
|
| 99 |
saved_audio_path = engine.save_audio(duration, audio_file)
|
| 100 |
-
if not saved_audio_path:
|
| 101 |
-
|
| 102 |
|
| 103 |
-
# Generate waveform visualization (Plot)
|
| 104 |
fig = engine.visualize_waveform(duration)
|
| 105 |
-
|
| 106 |
-
# Get simple waveform image (PIL Image)
|
| 107 |
waveform_image = engine.get_waveform_image()
|
| 108 |
-
|
| 109 |
-
# Get complete analysis text from the engine's perspective
|
| 110 |
analysis_text = engine.get_complete_analysis()
|
| 111 |
-
|
| 112 |
-
# Get symbolic interpretation
|
| 113 |
symbolic = engine.get_symbolic_interpretation()
|
| 114 |
|
| 115 |
-
print("✅ Modulation experience generated successfully.")
|
| 116 |
return analysis_text, saved_audio_path, fig, waveform_image, symbolic
|
| 117 |
|
| 118 |
except Exception as e:
|
| 119 |
error_msg = f"Error during Rhythma generation: {e}"
|
| 120 |
-
|
| 121 |
-
import traceback
|
| 122 |
-
traceback.print_exc()
|
| 123 |
-
# Return error message for analysis, and None for other outputs
|
| 124 |
return error_msg, None, None, None, None
|
| 125 |
|
| 126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
def rhythma_experience(
|
| 128 |
input_text, audio_input,
|
| 129 |
override_freq=None,
|
|
@@ -131,41 +108,20 @@ def rhythma_experience(
|
|
| 131 |
override_rhythm=None,
|
| 132 |
duration=5
|
| 133 |
):
|
| 134 |
-
"""Complete Rhythma experience pipeline: Analysis -> Generation"""
|
| 135 |
-
print("\n--- Starting New Rhythma Experience ---")
|
| 136 |
-
# Clean up input text
|
| 137 |
input_text = input_text.strip() if input_text else ""
|
| 138 |
-
|
| 139 |
-
# --- Step 1: Analyze input ---
|
| 140 |
-
# Ensure override_freq is float or None
|
| 141 |
-
try:
|
| 142 |
-
freq_override_value = float(override_freq) if override_freq is not None else 0.0
|
| 143 |
-
except (ValueError, TypeError):
|
| 144 |
-
freq_override_value = 0.0 # Default to 0 if invalid input
|
| 145 |
-
|
| 146 |
analysis = analyze_input(input_text, audio_input)
|
| 147 |
|
| 148 |
-
# --- Step 2: Generate modulated experience ---
|
| 149 |
-
# Pass analysis results and overrides to the generation function
|
| 150 |
analysis_text, audio_file, fig, waveform_image, symbolic = generate_modulated_experience(
|
| 151 |
analysis,
|
| 152 |
-
base_freq=freq_override_value,
|
| 153 |
modulation_type=override_modulation,
|
| 154 |
-
rhythm_pattern=override_rhythm
|
| 155 |
duration=duration
|
| 156 |
)
|
| 157 |
|
| 158 |
-
# --- Step 3: Prepare Outputs ---
|
| 159 |
-
# Get transcription from analysis result (will be empty string if no audio/transcription)
|
| 160 |
transcription = analysis.get("transcription", "") if isinstance(analysis, dict) else ""
|
| 161 |
-
|
| 162 |
-
# If only transcription failed, it might be in the transcription field
|
| 163 |
-
|
| 164 |
-
# Handle potential None figure if generation failed
|
| 165 |
-
plot_output = fig if fig else None # Gradio handles None for Plot output
|
| 166 |
-
|
| 167 |
-
print("--- Rhythma Experience Complete ---")
|
| 168 |
-
# Return all outputs for Gradio interface
|
| 169 |
return analysis_text, audio_file, plot_output, waveform_image, symbolic, transcription
|
| 170 |
|
| 171 |
# --- Create the Gradio Interface ---
|
|
@@ -206,13 +162,12 @@ def create_interface():
|
|
| 206 |
value="sine",
|
| 207 |
label="Override Modulation Type"
|
| 208 |
)
|
| 209 |
-
# Get available patterns from the engine instance
|
| 210 |
available_patterns = list(RhythmaModulationEngine().rhythm_configs.keys())
|
| 211 |
override_rhythm = gr.Dropdown(
|
| 212 |
-
choices=[
|
| 213 |
-
value=
|
| 214 |
label="Override Rhythm Pattern",
|
| 215 |
-
info="Leave
|
| 216 |
)
|
| 217 |
duration = gr.Slider(
|
| 218 |
minimum=3, maximum=60, value=10, step=1,
|
|
@@ -254,18 +209,18 @@ def create_interface():
|
|
| 254 |
# Add Examples
|
| 255 |
gr.Examples(
|
| 256 |
examples=[
|
| 257 |
-
["I'm feeling anxious about my upcoming presentation.", None, 0, "sine",
|
| 258 |
-
["I feel
|
| 259 |
-
["I need to focus on
|
| 260 |
-
["
|
| 261 |
-
["
|
| 262 |
-
["
|
| 263 |
-
["
|
| 264 |
],
|
| 265 |
inputs=[input_text, audio_input, override_freq, override_modulation, override_rhythm, duration],
|
| 266 |
outputs=[analysis_output, audio_output, waveform_plot, waveform_simple, symbolic_output, transcription_output],
|
| 267 |
-
fn=rhythma_experience,
|
| 268 |
-
cache_examples=False
|
| 269 |
)
|
| 270 |
|
| 271 |
gr.Markdown("---")
|
|
@@ -282,10 +237,7 @@ def create_interface():
|
|
| 282 |
# --- Run the Gradio App ---
|
| 283 |
if __name__ == "__main__":
|
| 284 |
if symphai_core is None:
|
| 285 |
-
|
| 286 |
else:
|
| 287 |
-
print("\n🚀 Launching Rhythma Gradio Interface...")
|
| 288 |
app_demo = create_interface()
|
| 289 |
-
|
| 290 |
-
# Set debug=True for more verbose logs during development
|
| 291 |
-
app_demo.launch()#debug=True)
|
|
|
|
| 1 |
import os
|
| 2 |
+
import logging
|
| 3 |
import gradio as gr
|
|
|
|
| 4 |
import matplotlib
|
| 5 |
+
matplotlib.use('Agg')
|
|
|
|
|
|
|
|
|
|
| 6 |
import tempfile
|
| 7 |
import time
|
| 8 |
+
from rhythma import RhythmaModulationEngine, RhythmaSymphAICore
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
logging.basicConfig(level=logging.INFO)
|
| 12 |
+
LOGGER = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
# --- Environment Variable Check ---
|
| 15 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 16 |
use_groq = bool(GROQ_API_KEY) # True only if key exists and is not empty
|
| 17 |
|
| 18 |
if not use_groq:
|
| 19 |
+
LOGGER.warning(
|
| 20 |
+
"GROQ_API_KEY not found. Advanced LLM analysis and transcription are disabled."
|
| 21 |
+
)
|
|
|
|
|
|
|
| 22 |
else:
|
| 23 |
+
LOGGER.info("GROQ_API_KEY found. Enabling Groq features.")
|
| 24 |
# --- End Environment Variable Check ---
|
| 25 |
|
| 26 |
|
| 27 |
# --- Initialize Core Components ---
|
| 28 |
try:
|
|
|
|
| 29 |
symphai_core = RhythmaSymphAICore(use_groq=use_groq)
|
| 30 |
except Exception as e:
|
| 31 |
+
LOGGER.exception("Could not initialize RhythmaSymphAICore: %s", e)
|
| 32 |
+
symphai_core = None
|
|
|
|
| 33 |
# --- End Initialization ---
|
| 34 |
|
| 35 |
|
| 36 |
# --- Core Functions ---
|
| 37 |
def analyze_input(input_text=None, audio_input=None):
|
|
|
|
| 38 |
if symphai_core is None:
|
| 39 |
return {"error": "Analysis Core failed to initialize."}
|
| 40 |
|
|
|
|
| 41 |
audio_filepath = audio_input if isinstance(audio_input, str) else None
|
|
|
|
|
|
|
|
|
|
| 42 |
return symphai_core.analyze_input(input_text or "", audio_filepath)
|
| 43 |
|
| 44 |
|
| 45 |
def generate_modulated_experience(analysis_result, base_freq=None, modulation_type="sine", rhythm_pattern=None, duration=5):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
if not isinstance(analysis_result, dict):
|
| 47 |
+
error_msg = "Internal error: analysis result is not in the expected format."
|
| 48 |
+
LOGGER.error(error_msg)
|
| 49 |
return error_msg, None, None, None, None
|
| 50 |
|
| 51 |
+
if analysis_result.get("error"):
|
| 52 |
error_msg = f"Analysis Error: {analysis_result['error']}"
|
| 53 |
+
LOGGER.error(error_msg)
|
|
|
|
| 54 |
return error_msg, None, None, None, None
|
| 55 |
|
| 56 |
+
emotional_state = analysis_result.get("emotional_state", "neutral")
|
| 57 |
+
final_rhythm_pattern = rhythm_pattern or analysis_result.get("rhythm_pattern", "calm")
|
| 58 |
+
final_base_freq = base_freq if base_freq and base_freq > 0 else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
try:
|
|
|
|
| 61 |
engine = RhythmaModulationEngine(
|
| 62 |
+
base_freq=final_base_freq,
|
| 63 |
modulation_type=modulation_type,
|
| 64 |
rhythm_pattern=final_rhythm_pattern,
|
| 65 |
+
emotional_state=emotional_state if not final_base_freq else None,
|
| 66 |
)
|
| 67 |
|
|
|
|
| 68 |
timestamp = int(time.time())
|
| 69 |
temp_dir = tempfile.gettempdir()
|
|
|
|
| 70 |
os.makedirs(temp_dir, exist_ok=True)
|
| 71 |
audio_file = os.path.join(temp_dir, f"rhythma_{timestamp}.wav")
|
| 72 |
|
|
|
|
| 73 |
saved_audio_path = engine.save_audio(duration, audio_file)
|
| 74 |
+
if not saved_audio_path:
|
| 75 |
+
raise RuntimeError("Failed to save generated audio file.")
|
| 76 |
|
|
|
|
| 77 |
fig = engine.visualize_waveform(duration)
|
|
|
|
|
|
|
| 78 |
waveform_image = engine.get_waveform_image()
|
|
|
|
|
|
|
| 79 |
analysis_text = engine.get_complete_analysis()
|
|
|
|
|
|
|
| 80 |
symbolic = engine.get_symbolic_interpretation()
|
| 81 |
|
|
|
|
| 82 |
return analysis_text, saved_audio_path, fig, waveform_image, symbolic
|
| 83 |
|
| 84 |
except Exception as e:
|
| 85 |
error_msg = f"Error during Rhythma generation: {e}"
|
| 86 |
+
LOGGER.exception(error_msg)
|
|
|
|
|
|
|
|
|
|
| 87 |
return error_msg, None, None, None, None
|
| 88 |
|
| 89 |
|
| 90 |
+
def coerce_frequency(value):
|
| 91 |
+
try:
|
| 92 |
+
numeric_value = float(value)
|
| 93 |
+
except (TypeError, ValueError):
|
| 94 |
+
return 0.0
|
| 95 |
+
return numeric_value if numeric_value > 0 else 0.0
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def normalize_rhythm_override(value):
|
| 99 |
+
if value in (None, "", "Automatic"):
|
| 100 |
+
return None
|
| 101 |
+
return value
|
| 102 |
+
|
| 103 |
+
|
| 104 |
def rhythma_experience(
|
| 105 |
input_text, audio_input,
|
| 106 |
override_freq=None,
|
|
|
|
| 108 |
override_rhythm=None,
|
| 109 |
duration=5
|
| 110 |
):
|
|
|
|
|
|
|
|
|
|
| 111 |
input_text = input_text.strip() if input_text else ""
|
| 112 |
+
freq_override_value = coerce_frequency(override_freq)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
analysis = analyze_input(input_text, audio_input)
|
| 114 |
|
|
|
|
|
|
|
| 115 |
analysis_text, audio_file, fig, waveform_image, symbolic = generate_modulated_experience(
|
| 116 |
analysis,
|
| 117 |
+
base_freq=freq_override_value,
|
| 118 |
modulation_type=override_modulation,
|
| 119 |
+
rhythm_pattern=normalize_rhythm_override(override_rhythm),
|
| 120 |
duration=duration
|
| 121 |
)
|
| 122 |
|
|
|
|
|
|
|
| 123 |
transcription = analysis.get("transcription", "") if isinstance(analysis, dict) else ""
|
| 124 |
+
plot_output = fig if fig else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
return analysis_text, audio_file, plot_output, waveform_image, symbolic, transcription
|
| 126 |
|
| 127 |
# --- Create the Gradio Interface ---
|
|
|
|
| 162 |
value="sine",
|
| 163 |
label="Override Modulation Type"
|
| 164 |
)
|
|
|
|
| 165 |
available_patterns = list(RhythmaModulationEngine().rhythm_configs.keys())
|
| 166 |
override_rhythm = gr.Dropdown(
|
| 167 |
+
choices=["Automatic"] + available_patterns,
|
| 168 |
+
value="Automatic",
|
| 169 |
label="Override Rhythm Pattern",
|
| 170 |
+
info="Leave on Automatic to use the pattern inferred from the analysis."
|
| 171 |
)
|
| 172 |
duration = gr.Slider(
|
| 173 |
minimum=3, maximum=60, value=10, step=1,
|
|
|
|
| 209 |
# Add Examples
|
| 210 |
gr.Examples(
|
| 211 |
examples=[
|
| 212 |
+
["I'm feeling anxious about my upcoming presentation.", None, 0, "sine", "Automatic", 10],
|
| 213 |
+
["I feel grounded and peaceful today.", None, 0, "sine", "Automatic", 15],
|
| 214 |
+
["I need to focus on deep work without distractions.", None, 0, "sine", "focused", 20],
|
| 215 |
+
["I'm overwhelmed and need something steady.", None, 0, "sine", "Automatic", 10],
|
| 216 |
+
["I'm excited and want a more energized soundscape.", None, 0, "pulse", "active", 10],
|
| 217 |
+
["I want to relax after a long day.", None, 0, "sine", "relaxed", 30],
|
| 218 |
+
["I'm feeling low and want something gentle.", None, 0, "sine", "Automatic", 15],
|
| 219 |
],
|
| 220 |
inputs=[input_text, audio_input, override_freq, override_modulation, override_rhythm, duration],
|
| 221 |
outputs=[analysis_output, audio_output, waveform_plot, waveform_simple, symbolic_output, transcription_output],
|
| 222 |
+
fn=rhythma_experience,
|
| 223 |
+
cache_examples=False
|
| 224 |
)
|
| 225 |
|
| 226 |
gr.Markdown("---")
|
|
|
|
| 237 |
# --- Run the Gradio App ---
|
| 238 |
if __name__ == "__main__":
|
| 239 |
if symphai_core is None:
|
| 240 |
+
LOGGER.error("Cannot launch Gradio app because RhythmaSymphAICore failed to initialize.")
|
| 241 |
else:
|
|
|
|
| 242 |
app_demo = create_interface()
|
| 243 |
+
app_demo.launch()
|
|
|
|
|
|
|
@@ -3,11 +3,7 @@ groq
|
|
| 3 |
soundfile
|
| 4 |
numpy
|
| 5 |
sentence-transformers
|
| 6 |
-
scikit-learn
|
| 7 |
-
pandas
|
| 8 |
matplotlib
|
| 9 |
pillow
|
| 10 |
-
librosa
|
| 11 |
-
SpeechRecognition
|
| 12 |
scipy
|
| 13 |
-
torch
|
|
|
|
| 3 |
soundfile
|
| 4 |
numpy
|
| 5 |
sentence-transformers
|
|
|
|
|
|
|
| 6 |
matplotlib
|
| 7 |
pillow
|
|
|
|
|
|
|
| 8 |
scipy
|
| 9 |
+
torch
|
|
@@ -1,663 +1,8 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
import os
|
| 10 |
-
import traceback # For better error logging
|
| 11 |
-
|
| 12 |
-
# --- Optional Dependency Handling ---
|
| 13 |
-
try:
|
| 14 |
-
from groq import Groq
|
| 15 |
-
GROQ_AVAILABLE = True
|
| 16 |
-
except ImportError:
|
| 17 |
-
GROQ_AVAILABLE = False
|
| 18 |
-
print("⚠️ Groq package not installed. LLM analysis and transcription disabled.")
|
| 19 |
-
|
| 20 |
-
try:
|
| 21 |
-
from sentence_transformers import SentenceTransformer
|
| 22 |
-
SENTENCE_TRANSFORMER_AVAILABLE = True
|
| 23 |
-
except ImportError:
|
| 24 |
-
SENTENCE_TRANSFORMER_AVAILABLE = False
|
| 25 |
-
print("⚠️ SentenceTransformer not installed. Falling back to simple text matching for analysis.")
|
| 26 |
-
# --- End Optional Dependency Handling ---
|
| 27 |
-
|
| 28 |
-
class RhythmaModulationEngine:
|
| 29 |
-
"""
|
| 30 |
-
Rhythma: The Living Modulation Engine
|
| 31 |
-
A dynamic rhythm-based audio modulation system that creates responsive
|
| 32 |
-
sound experiences based on rhythm patterns and emotional states.
|
| 33 |
-
"""
|
| 34 |
-
|
| 35 |
-
def __init__(self, base_freq=None, modulation_type="sine", rhythm_pattern=None, emotional_state=None):
|
| 36 |
-
"""
|
| 37 |
-
Initialize the RhythmaModulationEngine.
|
| 38 |
-
Args:
|
| 39 |
-
base_freq (float, optional): The base frequency in Hz. Overridden by emotional_state if provided.
|
| 40 |
-
modulation_type (str): Type of modulation (sine, pulse, chirp). Defaults to "sine".
|
| 41 |
-
rhythm_pattern (str, optional): Pattern type (calm, active, focused, relaxed). Defaults to "calm".
|
| 42 |
-
emotional_state (str, optional): Emotional state (anxious, stressed, calm, etc.). Maps to specific frequencies.
|
| 43 |
-
"""
|
| 44 |
-
self.modulation_type = modulation_type
|
| 45 |
-
self.sample_rate = 44100 # Standard audio sample rate
|
| 46 |
-
|
| 47 |
-
# Define frequency mappings for emotional states (Example frequencies)
|
| 48 |
-
self.emotional_frequencies = {
|
| 49 |
-
"anxious": 396,
|
| 50 |
-
"stressed": 528,
|
| 51 |
-
"calm": 741,
|
| 52 |
-
"sad": 417,
|
| 53 |
-
"angry": 852,
|
| 54 |
-
"fearful": 639,
|
| 55 |
-
"confused": 285,
|
| 56 |
-
"happy": 432,
|
| 57 |
-
"neutral": 440, # Added neutral state
|
| 58 |
-
"focused": 639, # Example mapping for focus intention
|
| 59 |
-
"relaxed": 741, # Example mapping for relax intention
|
| 60 |
-
"active": 528, # Example mapping for active intention
|
| 61 |
-
}
|
| 62 |
-
|
| 63 |
-
# Detailed information about emotional states/frequencies
|
| 64 |
-
self.emotional_info = {
|
| 65 |
-
"anxious": {"name": "Liberating Guilt and Fear", "advice": "The 396 Hz frequency may help release fear and guilt."},
|
| 66 |
-
"stressed": {"name": "Transformation and Miracles", "advice": "The 528 Hz frequency is associated with transformation."},
|
| 67 |
-
"calm": {"name": "Awakening Intuition", "advice": "The 741 Hz frequency is associated with awakening intuition."},
|
| 68 |
-
"sad": {"name": "Facilitating Change", "advice": "The 417 Hz frequency is linked to facilitating change."},
|
| 69 |
-
"angry": {"name": "Returning to Spiritual Order", "advice": "The 852 Hz frequency may aid in returning to inner strength."},
|
| 70 |
-
"fearful": {"name": "Connecting Relationships", "advice": "The 639 Hz frequency is associated with connecting relationships."},
|
| 71 |
-
"confused": {"name": "Quantum Cognition", "advice": "The 285 Hz frequency is believed to influence energy fields."},
|
| 72 |
-
"happy": {"name": "Harmonizing Vibrations", "advice": "The 432 Hz frequency is associated with natural harmony."},
|
| 73 |
-
"neutral": {"name": "Grounded Presence", "advice": "The 440 Hz frequency provides a stable reference point."},
|
| 74 |
-
"focused": {"name": "Clarity and Connection", "advice": "The 639 Hz frequency may support focus and understanding."},
|
| 75 |
-
"relaxed": {"name": "Intuitive Calm", "advice": "The 741 Hz frequency is linked to intuitive states and problem-solving."},
|
| 76 |
-
"active": {"name": "Dynamic Energy", "advice": "The 528 Hz frequency is associated with positive transformation."},
|
| 77 |
-
}
|
| 78 |
-
|
| 79 |
-
# Configure rhythm patterns
|
| 80 |
-
self.rhythm_configs = {
|
| 81 |
-
"calm": {"mod_depth": 0.15, "mod_freq": 0.5, "pulse_width": 0.7, "phase_shift": 0.1, "harmonics": [1.0, 0.5, 0.25, 0.125]},
|
| 82 |
-
"active": {"mod_depth": 0.4, "mod_freq": 2.5, "pulse_width": 0.3, "phase_shift": 0.3, "harmonics": [1.0, 0.7, 0.5, 0.3]},
|
| 83 |
-
"focused": {"mod_depth": 0.25, "mod_freq": 1.5, "pulse_width": 0.5, "phase_shift": 0.2, "harmonics": [1.0, 0.6, 0.3, 0.15]},
|
| 84 |
-
"relaxed": {"mod_depth": 0.2, "mod_freq": 0.3, "pulse_width": 0.8, "phase_shift": 0.05, "harmonics": [1.0, 0.4, 0.2, 0.1]}
|
| 85 |
-
}
|
| 86 |
-
|
| 87 |
-
# Symbolic mapping for rhythm patterns
|
| 88 |
-
self.symbolic_mapping = {
|
| 89 |
-
"calm": "Resonating in the Circle Archetype: completion, wholeness, presence",
|
| 90 |
-
"active": "Resonating in the Spiral Archetype: flow, transition, emergence",
|
| 91 |
-
"focused": "Resonating in the Triangle Archetype: clarity, direction, purpose",
|
| 92 |
-
"relaxed": "Resonating in the Wave Archetype: fluidity, acceptance, surrender"
|
| 93 |
-
}
|
| 94 |
-
|
| 95 |
-
# Determine emotional state and base frequency
|
| 96 |
-
valid_emotional_state = emotional_state if emotional_state and emotional_state in self.emotional_frequencies else None
|
| 97 |
-
self.emotional_state = valid_emotional_state
|
| 98 |
-
|
| 99 |
-
if self.emotional_state:
|
| 100 |
-
self.base_freq = self.emotional_frequencies[self.emotional_state]
|
| 101 |
-
elif base_freq and base_freq > 0: # Check if base_freq override is valid
|
| 102 |
-
self.base_freq = base_freq
|
| 103 |
-
# Try to find a state close to the frequency for info purposes
|
| 104 |
-
min_diff = float('inf')
|
| 105 |
-
closest_state = None
|
| 106 |
-
for state, freq in self.emotional_frequencies.items():
|
| 107 |
-
diff = abs(freq - base_freq)
|
| 108 |
-
if diff < min_diff:
|
| 109 |
-
min_diff = diff
|
| 110 |
-
closest_state = state
|
| 111 |
-
# Only assign if reasonably close (e.g., within 10 Hz)
|
| 112 |
-
if min_diff <= 10:
|
| 113 |
-
self.emotional_state = closest_state # Use for info display only
|
| 114 |
-
else:
|
| 115 |
-
self.emotional_state = None # No specific emotional state tied
|
| 116 |
-
else:
|
| 117 |
-
self.emotional_state = "neutral" # Default state if no emotion/freq provided
|
| 118 |
-
self.base_freq = self.emotional_frequencies[self.emotional_state]
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
# Set rhythm pattern
|
| 122 |
-
valid_rhythm_pattern = rhythm_pattern if rhythm_pattern and rhythm_pattern in self.rhythm_configs else None
|
| 123 |
-
self.rhythm_pattern = valid_rhythm_pattern or "calm" # Default to calm if not provided or invalid
|
| 124 |
-
|
| 125 |
-
# Get current rhythm config
|
| 126 |
-
self.config = self.rhythm_configs.get(self.rhythm_pattern, self.rhythm_configs["calm"])
|
| 127 |
-
|
| 128 |
-
def _generate_base_wave(self, duration):
|
| 129 |
-
"""Generate the base carrier wave"""
|
| 130 |
-
t = np.linspace(0, duration, int(self.sample_rate * duration), endpoint=False)
|
| 131 |
-
# Initial simple sine wave
|
| 132 |
-
base_wave = np.sin(2 * np.pi * self.base_freq * t)
|
| 133 |
-
|
| 134 |
-
# Apply harmonics for richer base sound *before* modulation
|
| 135 |
-
harmonics = self.config["harmonics"]
|
| 136 |
-
rich_wave = np.zeros_like(base_wave)
|
| 137 |
-
for i, harmonic_amp in enumerate(harmonics):
|
| 138 |
-
harmonic_freq = self.base_freq * (i + 1)
|
| 139 |
-
# Ensure harmonic frequency does not exceed Nyquist limit
|
| 140 |
-
if harmonic_freq < self.sample_rate / 2:
|
| 141 |
-
rich_wave += harmonic_amp * np.sin(2 * np.pi * harmonic_freq * t)
|
| 142 |
-
|
| 143 |
-
# Normalize the rich base wave before modulation
|
| 144 |
-
if np.max(np.abs(rich_wave)) > 0:
|
| 145 |
-
rich_wave = rich_wave / np.max(np.abs(rich_wave))
|
| 146 |
-
else:
|
| 147 |
-
rich_wave = base_wave # Fallback if harmonics resulted in zero
|
| 148 |
-
|
| 149 |
-
return t, rich_wave
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
def _apply_sine_modulation(self, t, carrier):
|
| 153 |
-
"""Apply sine wave amplitude modulation"""
|
| 154 |
-
mod_freq = self.config["mod_freq"]
|
| 155 |
-
mod_depth = self.config["mod_depth"]
|
| 156 |
-
mod_env = 1.0 + mod_depth * np.sin(2 * np.pi * mod_freq * t + self.config["phase_shift"])
|
| 157 |
-
return carrier * mod_env
|
| 158 |
-
|
| 159 |
-
def _apply_pulse_modulation(self, t, carrier):
|
| 160 |
-
"""Apply pulse wave amplitude modulation"""
|
| 161 |
-
mod_freq = self.config["mod_freq"]
|
| 162 |
-
mod_depth = self.config["mod_depth"]
|
| 163 |
-
pulse_width = self.config["pulse_width"]
|
| 164 |
-
pulse = 0.5 * (signal.square(2 * np.pi * mod_freq * t, duty=pulse_width) + 1) # 0 to 1
|
| 165 |
-
mod_env = 1.0 - mod_depth + mod_depth * pulse # Modulates between (1-depth) and 1
|
| 166 |
-
return carrier * mod_env
|
| 167 |
-
|
| 168 |
-
def _apply_chirp_modulation(self, t, carrier):
|
| 169 |
-
"""Apply frequency chirp modulation (applied differently)"""
|
| 170 |
-
# Chirp modulation modifies frequency directly, not amplitude envelope
|
| 171 |
-
# This implementation is more complex and might replace the base wave generation
|
| 172 |
-
# For simplicity, let's keep amplitude modulation for 'chirp' but with a varying mod freq
|
| 173 |
-
|
| 174 |
-
# Simple approach: vary the *modulation frequency* over time (like a siren)
|
| 175 |
-
start_mod_freq = max(0.1, self.config["mod_freq"] / 2) # Avoid 0 Hz
|
| 176 |
-
end_mod_freq = self.config["mod_freq"] * 2
|
| 177 |
-
instantaneous_mod_freq = np.linspace(start_mod_freq, end_mod_freq, len(t))
|
| 178 |
-
|
| 179 |
-
mod_depth = self.config["mod_depth"]
|
| 180 |
-
# Integrate frequency to get phase: 2 * pi * integral(f(t) dt)
|
| 181 |
-
phase = 2 * np.pi * np.cumsum(instantaneous_mod_freq) / self.sample_rate
|
| 182 |
-
mod_env = 1.0 + mod_depth * np.sin(phase + self.config["phase_shift"])
|
| 183 |
-
return carrier * mod_env
|
| 184 |
-
|
| 185 |
-
def generate_modulated_wave(self, duration):
|
| 186 |
-
"""
|
| 187 |
-
Generate modulated audio wave based on current settings.
|
| 188 |
-
Applies harmonics to the base wave first, then applies modulation.
|
| 189 |
-
"""
|
| 190 |
-
t, base_carrier = self._generate_base_wave(duration) # Base carrier now includes harmonics
|
| 191 |
-
|
| 192 |
-
# Apply the selected amplitude modulation type
|
| 193 |
-
if self.modulation_type == "sine":
|
| 194 |
-
modulated = self._apply_sine_modulation(t, base_carrier)
|
| 195 |
-
elif self.modulation_type == "pulse":
|
| 196 |
-
modulated = self._apply_pulse_modulation(t, base_carrier)
|
| 197 |
-
elif self.modulation_type == "chirp":
|
| 198 |
-
# Using the amplitude modulation with varying frequency approach
|
| 199 |
-
modulated = self._apply_chirp_modulation(t, base_carrier)
|
| 200 |
-
else:
|
| 201 |
-
modulated = base_carrier # Default to unmodulated rich carrier
|
| 202 |
-
|
| 203 |
-
# Final normalization to prevent clipping
|
| 204 |
-
max_amp = np.max(np.abs(modulated))
|
| 205 |
-
if max_amp > 0:
|
| 206 |
-
normalized = 0.9 * modulated / max_amp # Use 0.9 to leave headroom
|
| 207 |
-
else:
|
| 208 |
-
normalized = modulated # Avoid division by zero if signal is silent
|
| 209 |
-
|
| 210 |
-
return normalized
|
| 211 |
-
|
| 212 |
-
def save_audio(self, duration, file_path=None):
|
| 213 |
-
"""Generate and save audio to a file"""
|
| 214 |
-
audio = self.generate_modulated_wave(duration)
|
| 215 |
-
file_path = file_path or f"rhythma_{self.base_freq}Hz_{self.rhythm_pattern}.wav"
|
| 216 |
-
try:
|
| 217 |
-
sf.write(file_path, audio, self.sample_rate)
|
| 218 |
-
print(f"Audio saved to: {file_path}")
|
| 219 |
-
return file_path
|
| 220 |
-
except Exception as e:
|
| 221 |
-
print(f"Error saving audio file: {e}")
|
| 222 |
-
traceback.print_exc()
|
| 223 |
-
return None # Return None if saving fails
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
def visualize_waveform(self, duration):
|
| 227 |
-
"""Generate visualization of the modulated waveform"""
|
| 228 |
-
# Generate a shorter segment for visualization consistency
|
| 229 |
-
vis_duration = min(duration, 0.5) # Shorter duration for clearer plot
|
| 230 |
-
plot_samples = int(self.sample_rate * vis_duration)
|
| 231 |
-
|
| 232 |
-
t = np.linspace(0, vis_duration, plot_samples, endpoint=False)
|
| 233 |
-
modulated = self.generate_modulated_wave(vis_duration) # Generate specific duration
|
| 234 |
-
|
| 235 |
-
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 6), gridspec_kw={'height_ratios': [1, 1]})
|
| 236 |
-
|
| 237 |
-
# Plot time domain (zoom in on a small section for detail)
|
| 238 |
-
zoom_samples = min(plot_samples, 2000) # Show max ~45ms
|
| 239 |
-
ax1.plot(t[:zoom_samples], modulated[:zoom_samples])
|
| 240 |
-
title = f'Rhythma Waveform: {self.rhythm_pattern.capitalize()} ({self.modulation_type.capitalize()})'
|
| 241 |
-
if self.emotional_state:
|
| 242 |
-
title += f' - {self.emotional_state.capitalize()} ({self.base_freq} Hz)'
|
| 243 |
-
else:
|
| 244 |
-
title += f' - {self.base_freq} Hz'
|
| 245 |
-
ax1.set_title(title)
|
| 246 |
-
ax1.set_xlabel('Time (s)')
|
| 247 |
-
ax1.set_ylabel('Amplitude')
|
| 248 |
-
ax1.grid(True)
|
| 249 |
-
|
| 250 |
-
# Plot frequency domain (spectrogram)
|
| 251 |
-
try:
|
| 252 |
-
# Use the full generated segment for spectrogram if possible
|
| 253 |
-
full_wave = self.generate_modulated_wave(duration)
|
| 254 |
-
f, t_spec, Sxx = signal.spectrogram(full_wave, self.sample_rate, nperseg=1024)
|
| 255 |
-
# Limit frequency display range for clarity (e.g., up to 2kHz)
|
| 256 |
-
freq_limit_idx = np.where(f >= 2000)[0]
|
| 257 |
-
if len(freq_limit_idx) > 0:
|
| 258 |
-
f = f[:freq_limit_idx[0]]
|
| 259 |
-
Sxx = Sxx[:freq_limit_idx[0], :]
|
| 260 |
-
else: # Handle cases where max freq is below limit
|
| 261 |
-
pass
|
| 262 |
-
# Use logarithmic scale for power if needed
|
| 263 |
-
pcm = ax2.pcolormesh(t_spec, f, 10 * np.log10(Sxx + 1e-9), shading='gouraud', cmap='viridis') # Log scale power
|
| 264 |
-
fig.colorbar(pcm, ax=ax2, label='Power (dB)') # Add colorbar
|
| 265 |
-
ax2.set_ylabel('Frequency (Hz)')
|
| 266 |
-
ax2.set_xlabel('Time (s)')
|
| 267 |
-
ax2.set_title('Spectrogram')
|
| 268 |
-
|
| 269 |
-
except Exception as e:
|
| 270 |
-
print(f"Error generating spectrogram: {e}")
|
| 271 |
-
ax2.set_title('Spectrogram (Error)')
|
| 272 |
-
ax2.text(0.5, 0.5, 'Could not generate spectrogram', horizontalalignment='center', verticalalignment='center', transform=ax2.transAxes)
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
plt.tight_layout(rect=[0, 0.05, 1, 1]) # Adjust layout to prevent overlap, leave space at bottom
|
| 276 |
-
|
| 277 |
-
# Add symbolic interpretation below plots
|
| 278 |
-
fig_text = self.get_symbolic_interpretation()
|
| 279 |
-
emotion_info = self.emotional_info.get(self.emotional_state, {})
|
| 280 |
-
if emotion_info:
|
| 281 |
-
fig_text += f"\n{self.base_freq} Hz - {emotion_info.get('name', '')}: {emotion_info.get('advice', '')}"
|
| 282 |
-
elif not self.emotional_state: # Case where only base_freq was set
|
| 283 |
-
fig_text += f"\nBase Frequency: {self.base_freq} Hz"
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
fig.text(0.5, 0.01, fig_text, ha='center', va='bottom', fontsize=9, style='italic', wrap=True)
|
| 287 |
-
|
| 288 |
-
return fig
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
def get_waveform_image(self):
|
| 292 |
-
"""Generate a simple waveform image as a PIL Image"""
|
| 293 |
-
# Generate a short, clear representation of the *base* frequency wave
|
| 294 |
-
duration = 0.05 # Very short duration for visualization
|
| 295 |
-
t = np.linspace(0, duration, int(self.sample_rate * duration), False)
|
| 296 |
-
# Use the base frequency determined in __init__
|
| 297 |
-
tone = np.sin(2 * np.pi * self.base_freq * t)
|
| 298 |
-
|
| 299 |
-
plt.figure(figsize=(6, 2)) # Smaller figure for simple image
|
| 300 |
-
plt.plot(t, tone)
|
| 301 |
-
# plt.title(f"Base Tone: {self.base_freq} Hz") # Title might clutter small image
|
| 302 |
-
plt.xlabel("Time (s)")
|
| 303 |
-
plt.ylabel("Amplitude")
|
| 304 |
-
plt.ylim(-1.1, 1.1)
|
| 305 |
-
plt.grid(True)
|
| 306 |
-
plt.tight_layout()
|
| 307 |
-
|
| 308 |
-
buf = io.BytesIO()
|
| 309 |
-
plt.savefig(buf, format='png', bbox_inches='tight')
|
| 310 |
-
buf.seek(0)
|
| 311 |
-
plt.close() # Close the plot to free memory
|
| 312 |
-
|
| 313 |
-
return Image.open(buf)
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
def get_symbolic_interpretation(self):
|
| 317 |
-
"""Return the symbolic interpretation of the current rhythm pattern"""
|
| 318 |
-
return self.symbolic_mapping.get(self.rhythm_pattern, "Pattern Interpretation: Default")
|
| 319 |
-
|
| 320 |
-
def get_emotional_advice(self):
|
| 321 |
-
"""Get advice based on emotional state if available"""
|
| 322 |
-
if not self.emotional_state:
|
| 323 |
-
return "No specific emotional state identified."
|
| 324 |
-
|
| 325 |
-
emotion_info = self.emotional_info.get(self.emotional_state, {})
|
| 326 |
-
return emotion_info.get('advice', "General well-being advice applies.")
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
def get_complete_analysis(self):
|
| 330 |
-
"""Get a complete analysis including emotional and rhythmic information"""
|
| 331 |
-
analysis = []
|
| 332 |
-
|
| 333 |
-
if self.emotional_state:
|
| 334 |
-
emotion_info = self.emotional_info.get(self.emotional_state, {})
|
| 335 |
-
analysis.append(f"Detected State/Intention: {self.emotional_state.capitalize()}")
|
| 336 |
-
analysis.append(f"Resonant Frequency: {self.base_freq} Hz - {emotion_info.get('name', 'Frequency Information')}")
|
| 337 |
-
analysis.append(f"Guidance: {emotion_info.get('advice', 'Focus on the sound.')}")
|
| 338 |
-
else:
|
| 339 |
-
# This case happens if only override_freq was used and it didn't map closely to a state
|
| 340 |
-
analysis.append(f"Using Manual Frequency: {self.base_freq} Hz")
|
| 341 |
-
analysis.append("Guidance: Tune into the custom frequency.")
|
| 342 |
-
|
| 343 |
-
analysis.append(f"Rhythm Pattern: {self.rhythm_pattern.capitalize()}")
|
| 344 |
-
analysis.append(f"Symbolic Interpretation: {self.get_symbolic_interpretation()}")
|
| 345 |
-
analysis.append(f"Modulation Type: {self.modulation_type.capitalize()}")
|
| 346 |
-
|
| 347 |
-
return "\n\n".join(analysis)
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
class RhythmaSymphAICore:
|
| 351 |
-
"""
|
| 352 |
-
SymphAI Core - Interprets input to determine emotional state and rhythm pattern.
|
| 353 |
-
Handles text and audio input, utilizing Groq LLM and Sentence Transformers if available.
|
| 354 |
-
"""
|
| 355 |
-
|
| 356 |
-
def __init__(self, use_groq=True):
|
| 357 |
-
"""Initialize the SymphAI Core"""
|
| 358 |
-
# Expanded emotional states / intentions
|
| 359 |
-
self.emotional_states = [
|
| 360 |
-
"anxious", "stressed", "calm", "sad",
|
| 361 |
-
"angry", "fearful", "confused", "happy",
|
| 362 |
-
"neutral", "focused", "relaxed", "active", # Added intentions
|
| 363 |
-
]
|
| 364 |
-
|
| 365 |
-
# Default rhythm patterns
|
| 366 |
-
self.rhythm_patterns = list(RhythmaModulationEngine().rhythm_configs.keys()) # Get from engine
|
| 367 |
-
|
| 368 |
-
# Initialize Groq client if available and requested
|
| 369 |
-
self.groq_client = None
|
| 370 |
-
self.use_groq = use_groq and GROQ_AVAILABLE
|
| 371 |
-
if self.use_groq:
|
| 372 |
-
api_key = os.environ.get("GROQ_API_KEY")
|
| 373 |
-
if api_key:
|
| 374 |
-
try:
|
| 375 |
-
self.groq_client = Groq(api_key=api_key)
|
| 376 |
-
print("✅ Groq client initialized successfully.")
|
| 377 |
-
except Exception as e:
|
| 378 |
-
print(f"⚠️ Failed to initialize Groq client: {str(e)}")
|
| 379 |
-
self.use_groq = False # Disable Groq if init fails
|
| 380 |
-
else:
|
| 381 |
-
print("⚠️ GROQ_API_KEY environment variable not found. Groq features disabled.")
|
| 382 |
-
self.use_groq = False
|
| 383 |
-
|
| 384 |
-
# Initialize sentence transformer for semantic matching if available
|
| 385 |
-
self.embedding_model = None
|
| 386 |
-
self.emotional_embeddings = {}
|
| 387 |
-
self.rhythm_embeddings = {}
|
| 388 |
-
if SENTENCE_TRANSFORMER_AVAILABLE:
|
| 389 |
-
try:
|
| 390 |
-
# Using a common, effective model
|
| 391 |
-
self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 392 |
-
# Pre-compute embeddings for faster lookup
|
| 393 |
-
self.emotional_embeddings = {
|
| 394 |
-
state: self.embedding_model.encode([state])[0] # Get the 1D array
|
| 395 |
-
for state in self.emotional_states
|
| 396 |
-
}
|
| 397 |
-
self.rhythm_embeddings = {
|
| 398 |
-
pattern: self.embedding_model.encode([pattern])[0] # Get the 1D array
|
| 399 |
-
for pattern in self.rhythm_patterns
|
| 400 |
-
}
|
| 401 |
-
print("✅ SentenceTransformer initialized successfully.")
|
| 402 |
-
except Exception as e:
|
| 403 |
-
print(f"⚠️ Failed to initialize SentenceTransformer: {str(e)}. Using basic text matching.")
|
| 404 |
-
self.embedding_model = None # Ensure it's None if init fails
|
| 405 |
-
else:
|
| 406 |
-
print("ℹ️ SentenceTransformer not installed. Using basic text matching.")
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
# Line 343:
|
| 410 |
-
def detect_emotion_with_groq(self, input_text):
|
| 411 |
-
"""Use Groq LLM to detect emotion/intention in text"""
|
| 412 |
-
if not self.use_groq or not self.groq_client:
|
| 413 |
-
print("ℹ️ Groq not available or not initialized for emotion detection.")
|
| 414 |
-
return None # Indicate Groq wasn't used
|
| 415 |
-
|
| 416 |
-
# Refined prompt for better classification into our categories
|
| 417 |
-
available_states = ", ".join(self.emotional_states)
|
| 418 |
-
prompt = f"""Analyze the user's feeling described below.
|
| 419 |
-
Identify the single MOST prominent emotional state or intention from the following list:
|
| 420 |
-
{available_states}
|
| 421 |
-
Focus on the core feeling expressed. Respond with ONLY the chosen state/intention from the list.
|
| 422 |
-
User's feeling: "{input_text}"
|
| 423 |
-
State/Intention:"""
|
| 424 |
-
|
| 425 |
-
try:
|
| 426 |
-
print(f"ℹ️ Querying Groq for emotion analysis...")
|
| 427 |
-
chat_completion = self.groq_client.chat.completions.create(
|
| 428 |
-
messages=[{"role": "user", "content": prompt}],
|
| 429 |
-
model="llama-3.3-70b-versatile", # Specify a capable model available on Groq
|
| 430 |
-
max_tokens=15, # Allow slightly more tokens for flexibility
|
| 431 |
-
temperature=0.2, # Lower temperature for more deterministic classification
|
| 432 |
-
stop=["\n"] # Stop generation after the first line
|
| 433 |
-
)
|
| 434 |
-
|
| 435 |
-
detected_emotion = chat_completion.choices[0].message.content.strip().lower()
|
| 436 |
-
print(f"✅ Groq detected: {detected_emotion}")
|
| 437 |
-
|
| 438 |
-
# Validate the detected emotion against our list
|
| 439 |
-
if detected_emotion in self.emotional_states:
|
| 440 |
-
return detected_emotion
|
| 441 |
-
else:
|
| 442 |
-
print(f"⚠️ Groq returned '{detected_emotion}', not in known states. Attempting fallback match.")
|
| 443 |
-
# Fallback: If LLM returns something unexpected, find the closest match in our list
|
| 444 |
-
return self.get_closest_emotional_state(detected_emotion) # Use fallback on unexpected LLM output
|
| 445 |
-
|
| 446 |
-
except Exception as e:
|
| 447 |
-
print(f"❌ Error using Groq for emotion detection: {str(e)}")
|
| 448 |
-
traceback.print_exc()
|
| 449 |
-
return None # Indicate error or inability to use Groq
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
def get_closest_emotional_state(self, input_text):
|
| 453 |
-
"""Map input text to the closest emotional state using available methods."""
|
| 454 |
-
if not input_text:
|
| 455 |
-
return "neutral" # Default if no text
|
| 456 |
-
|
| 457 |
-
input_text_lower = input_text.lower()
|
| 458 |
-
|
| 459 |
-
# 1. Try simple keyword matching first (fastest)
|
| 460 |
-
for state in self.emotional_states:
|
| 461 |
-
if state in input_text_lower.split(): # Match whole words if possible
|
| 462 |
-
print(f"ℹ️ Matched keyword: {state}")
|
| 463 |
-
return state
|
| 464 |
-
# Simple substring check as backup
|
| 465 |
-
if state in input_text_lower:
|
| 466 |
-
print(f"ℹ️ Matched substring: {state}")
|
| 467 |
-
return state
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
# 2. If Sentence Transformer is available, use semantic similarity
|
| 471 |
-
if self.embedding_model and self.emotional_embeddings:
|
| 472 |
-
try:
|
| 473 |
-
print("ℹ️ Using Sentence Transformer for semantic emotion match.")
|
| 474 |
-
input_embedding = self.embedding_model.encode([input_text])[0] # Get 1D array
|
| 475 |
-
# Calculate cosine similarities
|
| 476 |
-
similarities = {
|
| 477 |
-
state: cosine_similarity(input_embedding.reshape(1, -1), embedding.reshape(1, -1))[0][0]
|
| 478 |
-
for state, embedding in self.emotional_embeddings.items()
|
| 479 |
-
}
|
| 480 |
-
# Find the state with the highest similarity
|
| 481 |
-
best_match = max(similarities, key=similarities.get)
|
| 482 |
-
print(f"✅ Semantic match: {best_match} (Similarity: {similarities[best_match]:.2f})")
|
| 483 |
-
return best_match
|
| 484 |
-
except Exception as e:
|
| 485 |
-
print(f"⚠️ Error during semantic matching: {e}. Falling back.")
|
| 486 |
-
traceback.print_exc()
|
| 487 |
-
|
| 488 |
-
# 3. Default fallback if no match found
|
| 489 |
-
print("ℹ️ No clear emotion match found, defaulting to 'neutral'.")
|
| 490 |
-
return "neutral"
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
def get_closest_rhythm_pattern(self, input_text=None, emotional_state=None):
|
| 494 |
-
"""Map input text or emotional state to the closest rhythm pattern."""
|
| 495 |
-
|
| 496 |
-
# 1. Direct mapping from emotional state (prioritized if state is known)
|
| 497 |
-
if emotional_state:
|
| 498 |
-
# Refined mapping based on typical energy levels/needs
|
| 499 |
-
mapping = {
|
| 500 |
-
"anxious": "calm", # Needs calming
|
| 501 |
-
"stressed": "relaxed", # Needs relaxation
|
| 502 |
-
"calm": "calm",
|
| 503 |
-
"sad": "relaxed", # Gentle support
|
| 504 |
-
"angry": "active", # Needs release/energy shift
|
| 505 |
-
"fearful": "calm", # Needs safety/grounding
|
| 506 |
-
"confused": "focused", # Needs clarity
|
| 507 |
-
"happy": "active", # Can match higher energy
|
| 508 |
-
"neutral": "calm",
|
| 509 |
-
"focused": "focused", # Align with intention
|
| 510 |
-
"relaxed": "relaxed", # Align with intention
|
| 511 |
-
"active": "active", # Align with intention
|
| 512 |
-
}
|
| 513 |
-
pattern = mapping.get(emotional_state, "calm") # Default to calm if state unknown
|
| 514 |
-
print(f"ℹ️ Rhythm pattern from state '{emotional_state}': {pattern}")
|
| 515 |
-
return pattern
|
| 516 |
-
|
| 517 |
-
# 2. If no emotional state, try matching input text semantically (if available)
|
| 518 |
-
if input_text and self.embedding_model and self.rhythm_embeddings:
|
| 519 |
-
try:
|
| 520 |
-
print("ℹ️ Using Sentence Transformer for semantic rhythm match.")
|
| 521 |
-
input_embedding = self.embedding_model.encode([input_text])[0]
|
| 522 |
-
similarities = {
|
| 523 |
-
pattern: cosine_similarity(input_embedding.reshape(1, -1), embedding.reshape(1, -1))[0][0]
|
| 524 |
-
for pattern, embedding in self.rhythm_embeddings.items()
|
| 525 |
-
}
|
| 526 |
-
best_match = max(similarities, key=similarities.get)
|
| 527 |
-
print(f"✅ Semantic rhythm match: {best_match} (Similarity: {similarities[best_match]:.2f})")
|
| 528 |
-
return best_match
|
| 529 |
-
except Exception as e:
|
| 530 |
-
print(f"⚠️ Error during semantic rhythm matching: {e}. Falling back.")
|
| 531 |
-
traceback.print_exc()
|
| 532 |
-
|
| 533 |
-
# 3. Default fallback
|
| 534 |
-
print("ℹ️ Defaulting rhythm pattern to 'calm'.")
|
| 535 |
-
return "calm"
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
def transcribe_audio(self, audio_path):
|
| 539 |
-
"""Transcribe audio using Groq Whisper if available"""
|
| 540 |
-
if not self.use_groq or not self.groq_client:
|
| 541 |
-
print("ℹ️ Groq not available for transcription.")
|
| 542 |
-
return None, "Transcription disabled: Groq client not available or API key missing."
|
| 543 |
-
|
| 544 |
-
if not audio_path or not os.path.exists(audio_path):
|
| 545 |
-
return None, "Transcription failed: Audio file path is invalid or missing."
|
| 546 |
-
|
| 547 |
-
try:
|
| 548 |
-
print(f"ℹ️ Transcribing audio file: {audio_path}")
|
| 549 |
-
with open(audio_path, "rb") as audio_file:
|
| 550 |
-
# Use whisper-large-v3 for potentially better accuracy
|
| 551 |
-
transcription_response = self.groq_client.audio.transcriptions.create(
|
| 552 |
-
file=(os.path.basename(audio_path), audio_file.read()),
|
| 553 |
-
model="whisper-large-v3", # Using v3
|
| 554 |
-
# response_format="verbose_json", # Get more details if needed
|
| 555 |
-
response_format="json", # Simpler format
|
| 556 |
-
)
|
| 557 |
-
|
| 558 |
-
transcribed_text = transcription_response.text
|
| 559 |
-
print(f"✅ Groq transcription successful: '{transcribed_text}'")
|
| 560 |
-
return transcribed_text, None # Return text and no error
|
| 561 |
-
|
| 562 |
-
except Exception as e:
|
| 563 |
-
error_message = f"Error during Groq transcription: {str(e)}"
|
| 564 |
-
print(f"❌ {error_message}")
|
| 565 |
-
traceback.print_exc()
|
| 566 |
-
return None, error_message # Return None and the error message
|
| 567 |
-
|
| 568 |
-
# --- THIS IS THE FUNCTION STARTING AT LINE 410 ---
|
| 569 |
-
def analyze_input(self, input_text=None, audio_path=None):
|
| 570 |
-
"""
|
| 571 |
-
Analyze input text and/or audio path to determine emotional state and rhythm pattern.
|
| 572 |
-
**Ensures a dictionary is always returned.**
|
| 573 |
-
"""
|
| 574 |
-
# ---> Line 411: Ensure this block is indented <---
|
| 575 |
-
analysis_result = {
|
| 576 |
-
"emotional_state": "neutral", # Default values
|
| 577 |
-
"rhythm_pattern": "calm", # Default values
|
| 578 |
-
"transcription": "",
|
| 579 |
-
"error": None
|
| 580 |
-
}
|
| 581 |
-
text_to_analyze = None
|
| 582 |
-
transcription_error = None
|
| 583 |
-
|
| 584 |
-
# ---> All lines below here inside the function must also be indented <---
|
| 585 |
-
print("-" * 20) # Separator for logs
|
| 586 |
-
print(f"ℹ️ SymphAI Core analyzing input: Text='{input_text}', Audio='{audio_path}'")
|
| 587 |
-
|
| 588 |
-
try:
|
| 589 |
-
# --- Step 1: Handle Audio Input (if provided and Groq available) ---
|
| 590 |
-
if audio_path and self.use_groq:
|
| 591 |
-
transcribed_text, transcription_error = self.transcribe_audio(audio_path)
|
| 592 |
-
if transcription_error:
|
| 593 |
-
print(f"⚠️ Transcription failed: {transcription_error}")
|
| 594 |
-
# Store error but potentially continue with text input if available
|
| 595 |
-
analysis_result["error"] = transcription_error
|
| 596 |
-
analysis_result["transcription"] = f"[Transcription Error: {transcription_error}]"
|
| 597 |
-
elif transcribed_text:
|
| 598 |
-
analysis_result["transcription"] = transcribed_text
|
| 599 |
-
text_to_analyze = transcribed_text # Prioritize transcribed text
|
| 600 |
-
print(f"ℹ️ Using transcribed text for analysis: '{text_to_analyze}'")
|
| 601 |
-
|
| 602 |
-
# --- Step 2: Determine Text for Analysis ---
|
| 603 |
-
if not text_to_analyze and input_text:
|
| 604 |
-
text_to_analyze = input_text # Use input_text if no successful transcription
|
| 605 |
-
print(f"ℹ️ Using provided text for analysis: '{text_to_analyze}'")
|
| 606 |
-
elif not text_to_analyze:
|
| 607 |
-
print("ℹ️ No text input or successful transcription available for analysis.")
|
| 608 |
-
# Keep default neutral/calm state
|
| 609 |
-
|
| 610 |
-
# --- Step 3: Detect Emotional State (if text available) ---
|
| 611 |
-
detected_emotion = None
|
| 612 |
-
if text_to_analyze:
|
| 613 |
-
if self.use_groq:
|
| 614 |
-
detected_emotion = self.detect_emotion_with_groq(text_to_analyze)
|
| 615 |
-
if detected_emotion:
|
| 616 |
-
analysis_result["emotional_state"] = detected_emotion
|
| 617 |
-
else:
|
| 618 |
-
# Groq failed or didn't run, try fallback
|
| 619 |
-
print("ℹ️ Groq emotion detection failed or skipped, trying fallback.")
|
| 620 |
-
analysis_result["emotional_state"] = self.get_closest_emotional_state(text_to_analyze)
|
| 621 |
-
else:
|
| 622 |
-
# Groq not used, directly use fallback
|
| 623 |
-
analysis_result["emotional_state"] = self.get_closest_emotional_state(text_to_analyze)
|
| 624 |
-
else:
|
| 625 |
-
# No text to analyze, stick with default "neutral"
|
| 626 |
-
analysis_result["emotional_state"] = "neutral"
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
# --- Step 4: Determine Rhythm Pattern ---
|
| 630 |
-
# Use the determined emotional state primarily, fallback to text if needed
|
| 631 |
-
current_emotion = analysis_result["emotional_state"]
|
| 632 |
-
analysis_result["rhythm_pattern"] = self.get_closest_rhythm_pattern(
|
| 633 |
-
input_text=text_to_analyze, # Pass text for potential semantic match if emotion is neutral/unclear
|
| 634 |
-
emotional_state=current_emotion
|
| 635 |
-
)
|
| 636 |
-
|
| 637 |
-
# Clean up error field if no actual error occurred during main analysis
|
| 638 |
-
if analysis_result["error"] is None and transcription_error:
|
| 639 |
-
# If transcription failed but text analysis succeeded, maybe clear the error?
|
| 640 |
-
# Decide if transcription error should persist if text analysis works.
|
| 641 |
-
# Let's keep it for now to inform the user.
|
| 642 |
-
pass
|
| 643 |
-
elif analysis_result["error"] is None:
|
| 644 |
-
# analysis_result.pop("error", None) # Alternative way to remove if None
|
| 645 |
-
del analysis_result["error"] # Remove error key if None
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
except Exception as e:
|
| 649 |
-
# --- Catch-all for unexpected errors during analysis ---
|
| 650 |
-
error_msg = f"Unexpected error during input analysis: {str(e)}"
|
| 651 |
-
print(f"❌ {error_msg}")
|
| 652 |
-
traceback.print_exc()
|
| 653 |
-
analysis_result = {
|
| 654 |
-
"emotional_state": "neutral", # Reset to defaults on error
|
| 655 |
-
"rhythm_pattern": "calm",
|
| 656 |
-
"transcription": analysis_result.get("transcription", ""), # Keep transcription if available
|
| 657 |
-
"error": error_msg
|
| 658 |
-
}
|
| 659 |
-
|
| 660 |
-
# ---> Ensure these lines are indented correctly at the function level <---
|
| 661 |
-
print(f"✅ SymphAI Core analysis complete. Result: {analysis_result}")
|
| 662 |
-
print("-" * 20) # Separator for logs
|
| 663 |
-
return analysis_result # GUARANTEED TO BE A DICTIONARY
|
|
|
|
| 1 |
+
from rhythma_analysis import AnalysisResult, RhythmaSymphAICore
|
| 2 |
+
from rhythma_engine import RhythmaModulationEngine
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"AnalysisResult",
|
| 6 |
+
"RhythmaModulationEngine",
|
| 7 |
+
"RhythmaSymphAICore",
|
| 8 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from dataclasses import asdict, dataclass
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from rhythma_engine import RhythmaModulationEngine
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
from groq import Groq
|
| 11 |
+
|
| 12 |
+
GROQ_AVAILABLE = True
|
| 13 |
+
except ImportError:
|
| 14 |
+
Groq = None
|
| 15 |
+
GROQ_AVAILABLE = False
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
LOGGER = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclass
|
| 22 |
+
class AnalysisResult:
|
| 23 |
+
emotional_state: str = "neutral"
|
| 24 |
+
rhythm_pattern: str = "calm"
|
| 25 |
+
transcription: str = ""
|
| 26 |
+
error: str | None = None
|
| 27 |
+
|
| 28 |
+
def to_dict(self):
|
| 29 |
+
return asdict(self)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _cosine_similarity(left, right):
|
| 33 |
+
denominator = np.linalg.norm(left) * np.linalg.norm(right)
|
| 34 |
+
if denominator == 0:
|
| 35 |
+
return -1.0
|
| 36 |
+
return float(np.dot(left, right) / denominator)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class RhythmaSymphAICore:
|
| 40 |
+
"""
|
| 41 |
+
Interprets text and audio input to determine emotional state and rhythm pattern.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, use_groq=True, use_embeddings=True):
|
| 45 |
+
self.emotional_states = [
|
| 46 |
+
"anxious",
|
| 47 |
+
"stressed",
|
| 48 |
+
"calm",
|
| 49 |
+
"sad",
|
| 50 |
+
"angry",
|
| 51 |
+
"fearful",
|
| 52 |
+
"confused",
|
| 53 |
+
"happy",
|
| 54 |
+
"neutral",
|
| 55 |
+
"focused",
|
| 56 |
+
"relaxed",
|
| 57 |
+
"active",
|
| 58 |
+
]
|
| 59 |
+
self.rhythm_patterns = list(RhythmaModulationEngine.RHYTHM_CONFIGS.keys())
|
| 60 |
+
|
| 61 |
+
self.groq_client = None
|
| 62 |
+
self.use_groq = use_groq and GROQ_AVAILABLE
|
| 63 |
+
self.use_embeddings = use_embeddings
|
| 64 |
+
self.embedding_model = None
|
| 65 |
+
self.emotional_embeddings = {}
|
| 66 |
+
self.rhythm_embeddings = {}
|
| 67 |
+
self._embedding_init_attempted = False
|
| 68 |
+
|
| 69 |
+
if self.use_groq:
|
| 70 |
+
self._initialize_groq_client()
|
| 71 |
+
|
| 72 |
+
def _initialize_groq_client(self):
|
| 73 |
+
api_key = os.environ.get("GROQ_API_KEY")
|
| 74 |
+
if not api_key:
|
| 75 |
+
LOGGER.warning("GROQ_API_KEY not found. Groq features disabled.")
|
| 76 |
+
self.use_groq = False
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
self.groq_client = Groq(api_key=api_key)
|
| 81 |
+
except Exception:
|
| 82 |
+
LOGGER.exception("Failed to initialize Groq client.")
|
| 83 |
+
self.use_groq = False
|
| 84 |
+
|
| 85 |
+
def _ensure_embeddings_loaded(self):
|
| 86 |
+
if not self.use_embeddings or self._embedding_init_attempted:
|
| 87 |
+
return
|
| 88 |
+
|
| 89 |
+
self._embedding_init_attempted = True
|
| 90 |
+
try:
|
| 91 |
+
from sentence_transformers import SentenceTransformer
|
| 92 |
+
|
| 93 |
+
self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 94 |
+
self.emotional_embeddings = {
|
| 95 |
+
state: self.embedding_model.encode([state])[0]
|
| 96 |
+
for state in self.emotional_states
|
| 97 |
+
}
|
| 98 |
+
self.rhythm_embeddings = {
|
| 99 |
+
pattern: self.embedding_model.encode([pattern])[0]
|
| 100 |
+
for pattern in self.rhythm_patterns
|
| 101 |
+
}
|
| 102 |
+
except ImportError:
|
| 103 |
+
LOGGER.info(
|
| 104 |
+
"SentenceTransformer not installed. Falling back to keyword matching."
|
| 105 |
+
)
|
| 106 |
+
self.use_embeddings = False
|
| 107 |
+
except Exception:
|
| 108 |
+
LOGGER.exception("Failed to initialize SentenceTransformer embeddings.")
|
| 109 |
+
self.use_embeddings = False
|
| 110 |
+
self.embedding_model = None
|
| 111 |
+
self.emotional_embeddings = {}
|
| 112 |
+
self.rhythm_embeddings = {}
|
| 113 |
+
|
| 114 |
+
def detect_emotion_with_groq(self, input_text):
|
| 115 |
+
if not self.use_groq or not self.groq_client:
|
| 116 |
+
return None
|
| 117 |
+
|
| 118 |
+
prompt = (
|
| 119 |
+
"Analyze the user's feeling described below.\n"
|
| 120 |
+
"Identify the single MOST prominent emotional state or intention from the following list:\n"
|
| 121 |
+
f"{', '.join(self.emotional_states)}\n"
|
| 122 |
+
"Focus on the core feeling expressed. Respond with ONLY the chosen state/intention from the list.\n"
|
| 123 |
+
f"User's feeling: \"{input_text}\"\n"
|
| 124 |
+
"State/Intention:"
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
chat_completion = self.groq_client.chat.completions.create(
|
| 129 |
+
messages=[{"role": "user", "content": prompt}],
|
| 130 |
+
model="llama-3.3-70b-versatile",
|
| 131 |
+
max_tokens=15,
|
| 132 |
+
temperature=0.2,
|
| 133 |
+
stop=["\n"],
|
| 134 |
+
)
|
| 135 |
+
detected_emotion = chat_completion.choices[0].message.content.strip().lower()
|
| 136 |
+
if detected_emotion in self.emotional_states:
|
| 137 |
+
return detected_emotion
|
| 138 |
+
return self.get_closest_emotional_state(detected_emotion)
|
| 139 |
+
except Exception:
|
| 140 |
+
LOGGER.exception("Groq emotion detection failed.")
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
def get_closest_emotional_state(self, input_text):
|
| 144 |
+
if not input_text:
|
| 145 |
+
return "neutral"
|
| 146 |
+
|
| 147 |
+
input_text_lower = input_text.lower()
|
| 148 |
+
words = set(input_text_lower.split())
|
| 149 |
+
for state in self.emotional_states:
|
| 150 |
+
if state in words or state in input_text_lower:
|
| 151 |
+
return state
|
| 152 |
+
|
| 153 |
+
self._ensure_embeddings_loaded()
|
| 154 |
+
if self.embedding_model and self.emotional_embeddings:
|
| 155 |
+
try:
|
| 156 |
+
input_embedding = self.embedding_model.encode([input_text])[0]
|
| 157 |
+
return max(
|
| 158 |
+
self.emotional_embeddings,
|
| 159 |
+
key=lambda state: _cosine_similarity(
|
| 160 |
+
input_embedding, self.emotional_embeddings[state]
|
| 161 |
+
),
|
| 162 |
+
)
|
| 163 |
+
except Exception:
|
| 164 |
+
LOGGER.exception("Semantic emotion matching failed.")
|
| 165 |
+
|
| 166 |
+
return "neutral"
|
| 167 |
+
|
| 168 |
+
def get_closest_rhythm_pattern(self, input_text=None, emotional_state=None):
|
| 169 |
+
if emotional_state:
|
| 170 |
+
mapping = {
|
| 171 |
+
"anxious": "calm",
|
| 172 |
+
"stressed": "relaxed",
|
| 173 |
+
"calm": "calm",
|
| 174 |
+
"sad": "relaxed",
|
| 175 |
+
"angry": "active",
|
| 176 |
+
"fearful": "calm",
|
| 177 |
+
"confused": "focused",
|
| 178 |
+
"happy": "active",
|
| 179 |
+
"neutral": "calm",
|
| 180 |
+
"focused": "focused",
|
| 181 |
+
"relaxed": "relaxed",
|
| 182 |
+
"active": "active",
|
| 183 |
+
}
|
| 184 |
+
return mapping.get(emotional_state, "calm")
|
| 185 |
+
|
| 186 |
+
self._ensure_embeddings_loaded()
|
| 187 |
+
if input_text and self.embedding_model and self.rhythm_embeddings:
|
| 188 |
+
try:
|
| 189 |
+
input_embedding = self.embedding_model.encode([input_text])[0]
|
| 190 |
+
return max(
|
| 191 |
+
self.rhythm_embeddings,
|
| 192 |
+
key=lambda pattern: _cosine_similarity(
|
| 193 |
+
input_embedding, self.rhythm_embeddings[pattern]
|
| 194 |
+
),
|
| 195 |
+
)
|
| 196 |
+
except Exception:
|
| 197 |
+
LOGGER.exception("Semantic rhythm matching failed.")
|
| 198 |
+
|
| 199 |
+
return "calm"
|
| 200 |
+
|
| 201 |
+
def transcribe_audio(self, audio_path):
|
| 202 |
+
if not self.use_groq or not self.groq_client:
|
| 203 |
+
return None, "Transcription disabled: Groq client not available or API key missing."
|
| 204 |
+
|
| 205 |
+
if not audio_path or not os.path.exists(audio_path):
|
| 206 |
+
return None, "Transcription failed: Audio file path is invalid or missing."
|
| 207 |
+
|
| 208 |
+
try:
|
| 209 |
+
with open(audio_path, "rb") as audio_file:
|
| 210 |
+
response = self.groq_client.audio.transcriptions.create(
|
| 211 |
+
file=(os.path.basename(audio_path), audio_file.read()),
|
| 212 |
+
model="whisper-large-v3",
|
| 213 |
+
response_format="json",
|
| 214 |
+
)
|
| 215 |
+
return response.text, None
|
| 216 |
+
except Exception as exc:
|
| 217 |
+
LOGGER.exception("Groq transcription failed.")
|
| 218 |
+
return None, f"Error during Groq transcription: {exc}"
|
| 219 |
+
|
| 220 |
+
def analyze_input(self, input_text=None, audio_path=None):
|
| 221 |
+
result = AnalysisResult()
|
| 222 |
+
text_to_analyze = None
|
| 223 |
+
|
| 224 |
+
try:
|
| 225 |
+
if audio_path and self.use_groq:
|
| 226 |
+
transcribed_text, transcription_error = self.transcribe_audio(audio_path)
|
| 227 |
+
if transcription_error:
|
| 228 |
+
result.error = transcription_error
|
| 229 |
+
result.transcription = f"[Transcription Error: {transcription_error}]"
|
| 230 |
+
elif transcribed_text:
|
| 231 |
+
result.transcription = transcribed_text
|
| 232 |
+
text_to_analyze = transcribed_text
|
| 233 |
+
|
| 234 |
+
if not text_to_analyze and input_text:
|
| 235 |
+
text_to_analyze = input_text
|
| 236 |
+
|
| 237 |
+
if text_to_analyze:
|
| 238 |
+
detected_emotion = None
|
| 239 |
+
if self.use_groq:
|
| 240 |
+
detected_emotion = self.detect_emotion_with_groq(text_to_analyze)
|
| 241 |
+
|
| 242 |
+
result.emotional_state = detected_emotion or self.get_closest_emotional_state(
|
| 243 |
+
text_to_analyze
|
| 244 |
+
)
|
| 245 |
+
else:
|
| 246 |
+
result.emotional_state = "neutral"
|
| 247 |
+
|
| 248 |
+
result.rhythm_pattern = self.get_closest_rhythm_pattern(
|
| 249 |
+
input_text=text_to_analyze,
|
| 250 |
+
emotional_state=result.emotional_state,
|
| 251 |
+
)
|
| 252 |
+
except Exception as exc:
|
| 253 |
+
LOGGER.exception("Unexpected error during input analysis.")
|
| 254 |
+
result = AnalysisResult(
|
| 255 |
+
transcription=result.transcription,
|
| 256 |
+
error=f"Unexpected error during input analysis: {exc}",
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
return result.to_dict()
|
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from scipy import signal
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
import soundfile as sf
|
| 11 |
+
|
| 12 |
+
SOUNDFILE_AVAILABLE = True
|
| 13 |
+
except ImportError:
|
| 14 |
+
sf = None
|
| 15 |
+
SOUNDFILE_AVAILABLE = False
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
LOGGER = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class RhythmaModulationEngine:
|
| 22 |
+
"""
|
| 23 |
+
Dynamic rhythm-based audio modulation engine.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
SAMPLE_RATE = 44100
|
| 27 |
+
EMOTIONAL_FREQUENCIES = {
|
| 28 |
+
"anxious": 396,
|
| 29 |
+
"stressed": 528,
|
| 30 |
+
"calm": 741,
|
| 31 |
+
"sad": 417,
|
| 32 |
+
"angry": 852,
|
| 33 |
+
"fearful": 639,
|
| 34 |
+
"confused": 285,
|
| 35 |
+
"happy": 432,
|
| 36 |
+
"neutral": 440,
|
| 37 |
+
"focused": 639,
|
| 38 |
+
"relaxed": 741,
|
| 39 |
+
"active": 528,
|
| 40 |
+
}
|
| 41 |
+
EMOTIONAL_INFO = {
|
| 42 |
+
"anxious": {
|
| 43 |
+
"name": "Liberating Guilt and Fear",
|
| 44 |
+
"advice": "The 396 Hz frequency may help release fear and guilt.",
|
| 45 |
+
},
|
| 46 |
+
"stressed": {
|
| 47 |
+
"name": "Transformation and Miracles",
|
| 48 |
+
"advice": "The 528 Hz frequency is associated with transformation.",
|
| 49 |
+
},
|
| 50 |
+
"calm": {
|
| 51 |
+
"name": "Awakening Intuition",
|
| 52 |
+
"advice": "The 741 Hz frequency is associated with awakening intuition.",
|
| 53 |
+
},
|
| 54 |
+
"sad": {
|
| 55 |
+
"name": "Facilitating Change",
|
| 56 |
+
"advice": "The 417 Hz frequency is linked to facilitating change.",
|
| 57 |
+
},
|
| 58 |
+
"angry": {
|
| 59 |
+
"name": "Returning to Spiritual Order",
|
| 60 |
+
"advice": "The 852 Hz frequency may aid in returning to inner strength.",
|
| 61 |
+
},
|
| 62 |
+
"fearful": {
|
| 63 |
+
"name": "Connecting Relationships",
|
| 64 |
+
"advice": "The 639 Hz frequency is associated with connecting relationships.",
|
| 65 |
+
},
|
| 66 |
+
"confused": {
|
| 67 |
+
"name": "Quantum Cognition",
|
| 68 |
+
"advice": "The 285 Hz frequency is believed to influence energy fields.",
|
| 69 |
+
},
|
| 70 |
+
"happy": {
|
| 71 |
+
"name": "Harmonizing Vibrations",
|
| 72 |
+
"advice": "The 432 Hz frequency is associated with natural harmony.",
|
| 73 |
+
},
|
| 74 |
+
"neutral": {
|
| 75 |
+
"name": "Grounded Presence",
|
| 76 |
+
"advice": "The 440 Hz frequency provides a stable reference point.",
|
| 77 |
+
},
|
| 78 |
+
"focused": {
|
| 79 |
+
"name": "Clarity and Connection",
|
| 80 |
+
"advice": "The 639 Hz frequency may support focus and understanding.",
|
| 81 |
+
},
|
| 82 |
+
"relaxed": {
|
| 83 |
+
"name": "Intuitive Calm",
|
| 84 |
+
"advice": "The 741 Hz frequency is linked to intuitive states and problem-solving.",
|
| 85 |
+
},
|
| 86 |
+
"active": {
|
| 87 |
+
"name": "Dynamic Energy",
|
| 88 |
+
"advice": "The 528 Hz frequency is associated with positive transformation.",
|
| 89 |
+
},
|
| 90 |
+
}
|
| 91 |
+
RHYTHM_CONFIGS = {
|
| 92 |
+
"calm": {
|
| 93 |
+
"mod_depth": 0.15,
|
| 94 |
+
"mod_freq": 0.5,
|
| 95 |
+
"pulse_width": 0.7,
|
| 96 |
+
"phase_shift": 0.1,
|
| 97 |
+
"harmonics": [1.0, 0.5, 0.25, 0.125],
|
| 98 |
+
},
|
| 99 |
+
"active": {
|
| 100 |
+
"mod_depth": 0.4,
|
| 101 |
+
"mod_freq": 2.5,
|
| 102 |
+
"pulse_width": 0.3,
|
| 103 |
+
"phase_shift": 0.3,
|
| 104 |
+
"harmonics": [1.0, 0.7, 0.5, 0.3],
|
| 105 |
+
},
|
| 106 |
+
"focused": {
|
| 107 |
+
"mod_depth": 0.25,
|
| 108 |
+
"mod_freq": 1.5,
|
| 109 |
+
"pulse_width": 0.5,
|
| 110 |
+
"phase_shift": 0.2,
|
| 111 |
+
"harmonics": [1.0, 0.6, 0.3, 0.15],
|
| 112 |
+
},
|
| 113 |
+
"relaxed": {
|
| 114 |
+
"mod_depth": 0.2,
|
| 115 |
+
"mod_freq": 0.3,
|
| 116 |
+
"pulse_width": 0.8,
|
| 117 |
+
"phase_shift": 0.05,
|
| 118 |
+
"harmonics": [1.0, 0.4, 0.2, 0.1],
|
| 119 |
+
},
|
| 120 |
+
}
|
| 121 |
+
SYMBOLIC_MAPPING = {
|
| 122 |
+
"calm": "Resonating in the Circle Archetype: completion, wholeness, presence",
|
| 123 |
+
"active": "Resonating in the Spiral Archetype: flow, transition, emergence",
|
| 124 |
+
"focused": "Resonating in the Triangle Archetype: clarity, direction, purpose",
|
| 125 |
+
"relaxed": "Resonating in the Wave Archetype: fluidity, acceptance, surrender",
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
def __init__(
|
| 129 |
+
self,
|
| 130 |
+
base_freq=None,
|
| 131 |
+
modulation_type="sine",
|
| 132 |
+
rhythm_pattern=None,
|
| 133 |
+
emotional_state=None,
|
| 134 |
+
):
|
| 135 |
+
self.modulation_type = modulation_type
|
| 136 |
+
self.sample_rate = self.SAMPLE_RATE
|
| 137 |
+
self.emotional_frequencies = self.EMOTIONAL_FREQUENCIES
|
| 138 |
+
self.emotional_info = self.EMOTIONAL_INFO
|
| 139 |
+
self.rhythm_configs = self.RHYTHM_CONFIGS
|
| 140 |
+
self.symbolic_mapping = self.SYMBOLIC_MAPPING
|
| 141 |
+
|
| 142 |
+
valid_emotional_state = (
|
| 143 |
+
emotional_state
|
| 144 |
+
if emotional_state and emotional_state in self.emotional_frequencies
|
| 145 |
+
else None
|
| 146 |
+
)
|
| 147 |
+
self.emotional_state = valid_emotional_state
|
| 148 |
+
|
| 149 |
+
if self.emotional_state:
|
| 150 |
+
self.base_freq = self.emotional_frequencies[self.emotional_state]
|
| 151 |
+
elif base_freq and base_freq > 0:
|
| 152 |
+
self.base_freq = base_freq
|
| 153 |
+
self.emotional_state = self._find_closest_state(base_freq)
|
| 154 |
+
else:
|
| 155 |
+
self.emotional_state = "neutral"
|
| 156 |
+
self.base_freq = self.emotional_frequencies[self.emotional_state]
|
| 157 |
+
|
| 158 |
+
valid_rhythm_pattern = (
|
| 159 |
+
rhythm_pattern if rhythm_pattern and rhythm_pattern in self.rhythm_configs else None
|
| 160 |
+
)
|
| 161 |
+
self.rhythm_pattern = valid_rhythm_pattern or "calm"
|
| 162 |
+
self.config = self.rhythm_configs[self.rhythm_pattern]
|
| 163 |
+
|
| 164 |
+
def _find_closest_state(self, base_freq):
|
| 165 |
+
min_diff = float("inf")
|
| 166 |
+
closest_state = None
|
| 167 |
+
for state, freq in self.emotional_frequencies.items():
|
| 168 |
+
diff = abs(freq - base_freq)
|
| 169 |
+
if diff < min_diff:
|
| 170 |
+
min_diff = diff
|
| 171 |
+
closest_state = state
|
| 172 |
+
return closest_state if min_diff <= 10 else None
|
| 173 |
+
|
| 174 |
+
def _generate_base_wave(self, duration):
|
| 175 |
+
t = np.linspace(0, duration, int(self.sample_rate * duration), endpoint=False)
|
| 176 |
+
base_wave = np.sin(2 * np.pi * self.base_freq * t)
|
| 177 |
+
|
| 178 |
+
rich_wave = np.zeros_like(base_wave)
|
| 179 |
+
for index, harmonic_amp in enumerate(self.config["harmonics"], start=1):
|
| 180 |
+
harmonic_freq = self.base_freq * index
|
| 181 |
+
if harmonic_freq < self.sample_rate / 2:
|
| 182 |
+
rich_wave += harmonic_amp * np.sin(2 * np.pi * harmonic_freq * t)
|
| 183 |
+
|
| 184 |
+
if np.max(np.abs(rich_wave)) > 0:
|
| 185 |
+
rich_wave = rich_wave / np.max(np.abs(rich_wave))
|
| 186 |
+
else:
|
| 187 |
+
rich_wave = base_wave
|
| 188 |
+
|
| 189 |
+
return t, rich_wave
|
| 190 |
+
|
| 191 |
+
def _apply_sine_modulation(self, t, carrier):
|
| 192 |
+
mod_env = 1.0 + self.config["mod_depth"] * np.sin(
|
| 193 |
+
2 * np.pi * self.config["mod_freq"] * t + self.config["phase_shift"]
|
| 194 |
+
)
|
| 195 |
+
return carrier * mod_env
|
| 196 |
+
|
| 197 |
+
def _apply_pulse_modulation(self, t, carrier):
|
| 198 |
+
pulse = 0.5 * (
|
| 199 |
+
signal.square(
|
| 200 |
+
2 * np.pi * self.config["mod_freq"] * t,
|
| 201 |
+
duty=self.config["pulse_width"],
|
| 202 |
+
)
|
| 203 |
+
+ 1
|
| 204 |
+
)
|
| 205 |
+
mod_env = 1.0 - self.config["mod_depth"] + self.config["mod_depth"] * pulse
|
| 206 |
+
return carrier * mod_env
|
| 207 |
+
|
| 208 |
+
def _apply_chirp_modulation(self, t, carrier):
|
| 209 |
+
start_mod_freq = max(0.1, self.config["mod_freq"] / 2)
|
| 210 |
+
end_mod_freq = self.config["mod_freq"] * 2
|
| 211 |
+
instantaneous_mod_freq = np.linspace(start_mod_freq, end_mod_freq, len(t))
|
| 212 |
+
phase = 2 * np.pi * np.cumsum(instantaneous_mod_freq) / self.sample_rate
|
| 213 |
+
mod_env = 1.0 + self.config["mod_depth"] * np.sin(
|
| 214 |
+
phase + self.config["phase_shift"]
|
| 215 |
+
)
|
| 216 |
+
return carrier * mod_env
|
| 217 |
+
|
| 218 |
+
def generate_modulated_wave(self, duration):
|
| 219 |
+
t, base_carrier = self._generate_base_wave(duration)
|
| 220 |
+
|
| 221 |
+
if self.modulation_type == "sine":
|
| 222 |
+
modulated = self._apply_sine_modulation(t, base_carrier)
|
| 223 |
+
elif self.modulation_type == "pulse":
|
| 224 |
+
modulated = self._apply_pulse_modulation(t, base_carrier)
|
| 225 |
+
elif self.modulation_type == "chirp":
|
| 226 |
+
modulated = self._apply_chirp_modulation(t, base_carrier)
|
| 227 |
+
else:
|
| 228 |
+
modulated = base_carrier
|
| 229 |
+
|
| 230 |
+
max_amp = np.max(np.abs(modulated))
|
| 231 |
+
if max_amp <= 0:
|
| 232 |
+
return modulated
|
| 233 |
+
return 0.9 * modulated / max_amp
|
| 234 |
+
|
| 235 |
+
def save_audio(self, duration, file_path=None):
|
| 236 |
+
if not SOUNDFILE_AVAILABLE:
|
| 237 |
+
LOGGER.error("soundfile is not installed; audio export is unavailable.")
|
| 238 |
+
return None
|
| 239 |
+
|
| 240 |
+
audio = self.generate_modulated_wave(duration)
|
| 241 |
+
output_path = file_path or f"rhythma_{self.base_freq}Hz_{self.rhythm_pattern}.wav"
|
| 242 |
+
try:
|
| 243 |
+
sf.write(output_path, audio, self.sample_rate)
|
| 244 |
+
LOGGER.info("Audio saved to %s", output_path)
|
| 245 |
+
return output_path
|
| 246 |
+
except Exception:
|
| 247 |
+
LOGGER.exception("Failed to save audio to %s", output_path)
|
| 248 |
+
return None
|
| 249 |
+
|
| 250 |
+
def visualize_waveform(self, duration):
|
| 251 |
+
vis_duration = min(duration, 0.5)
|
| 252 |
+
plot_samples = int(self.sample_rate * vis_duration)
|
| 253 |
+
t = np.linspace(0, vis_duration, plot_samples, endpoint=False)
|
| 254 |
+
modulated = self.generate_modulated_wave(vis_duration)
|
| 255 |
+
|
| 256 |
+
fig, (ax1, ax2) = plt.subplots(
|
| 257 |
+
2, 1, figsize=(10, 6), gridspec_kw={"height_ratios": [1, 1]}
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
zoom_samples = min(plot_samples, 2000)
|
| 261 |
+
ax1.plot(t[:zoom_samples], modulated[:zoom_samples])
|
| 262 |
+
title = (
|
| 263 |
+
f"Rhythma Waveform: {self.rhythm_pattern.capitalize()} "
|
| 264 |
+
f"({self.modulation_type.capitalize()})"
|
| 265 |
+
)
|
| 266 |
+
if self.emotional_state:
|
| 267 |
+
title += f" - {self.emotional_state.capitalize()} ({self.base_freq} Hz)"
|
| 268 |
+
else:
|
| 269 |
+
title += f" - {self.base_freq} Hz"
|
| 270 |
+
ax1.set_title(title)
|
| 271 |
+
ax1.set_xlabel("Time (s)")
|
| 272 |
+
ax1.set_ylabel("Amplitude")
|
| 273 |
+
ax1.grid(True)
|
| 274 |
+
|
| 275 |
+
try:
|
| 276 |
+
full_wave = self.generate_modulated_wave(duration)
|
| 277 |
+
freqs, times, spectrogram = signal.spectrogram(
|
| 278 |
+
full_wave, self.sample_rate, nperseg=1024
|
| 279 |
+
)
|
| 280 |
+
freq_limit_idx = np.where(freqs >= 2000)[0]
|
| 281 |
+
if len(freq_limit_idx) > 0:
|
| 282 |
+
cutoff = freq_limit_idx[0]
|
| 283 |
+
freqs = freqs[:cutoff]
|
| 284 |
+
spectrogram = spectrogram[:cutoff, :]
|
| 285 |
+
|
| 286 |
+
pcm = ax2.pcolormesh(
|
| 287 |
+
times,
|
| 288 |
+
freqs,
|
| 289 |
+
10 * np.log10(spectrogram + 1e-9),
|
| 290 |
+
shading="gouraud",
|
| 291 |
+
cmap="viridis",
|
| 292 |
+
)
|
| 293 |
+
fig.colorbar(pcm, ax=ax2, label="Power (dB)")
|
| 294 |
+
ax2.set_ylabel("Frequency (Hz)")
|
| 295 |
+
ax2.set_xlabel("Time (s)")
|
| 296 |
+
ax2.set_title("Spectrogram")
|
| 297 |
+
except Exception:
|
| 298 |
+
LOGGER.exception("Failed to generate spectrogram.")
|
| 299 |
+
ax2.set_title("Spectrogram (Error)")
|
| 300 |
+
ax2.text(
|
| 301 |
+
0.5,
|
| 302 |
+
0.5,
|
| 303 |
+
"Could not generate spectrogram",
|
| 304 |
+
horizontalalignment="center",
|
| 305 |
+
verticalalignment="center",
|
| 306 |
+
transform=ax2.transAxes,
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
plt.tight_layout(rect=[0, 0.05, 1, 1])
|
| 310 |
+
|
| 311 |
+
fig_text = self.get_symbolic_interpretation()
|
| 312 |
+
emotion_info = self.emotional_info.get(self.emotional_state, {})
|
| 313 |
+
if emotion_info:
|
| 314 |
+
fig_text += (
|
| 315 |
+
f"\n{self.base_freq} Hz - {emotion_info.get('name', '')}: "
|
| 316 |
+
f"{emotion_info.get('advice', '')}"
|
| 317 |
+
)
|
| 318 |
+
elif not self.emotional_state:
|
| 319 |
+
fig_text += f"\nBase Frequency: {self.base_freq} Hz"
|
| 320 |
+
|
| 321 |
+
fig.text(0.5, 0.01, fig_text, ha="center", va="bottom", fontsize=9, style="italic", wrap=True)
|
| 322 |
+
return fig
|
| 323 |
+
|
| 324 |
+
def get_waveform_image(self):
|
| 325 |
+
duration = 0.05
|
| 326 |
+
t = np.linspace(0, duration, int(self.sample_rate * duration), False)
|
| 327 |
+
tone = np.sin(2 * np.pi * self.base_freq * t)
|
| 328 |
+
|
| 329 |
+
plt.figure(figsize=(6, 2))
|
| 330 |
+
plt.plot(t, tone)
|
| 331 |
+
plt.xlabel("Time (s)")
|
| 332 |
+
plt.ylabel("Amplitude")
|
| 333 |
+
plt.ylim(-1.1, 1.1)
|
| 334 |
+
plt.grid(True)
|
| 335 |
+
plt.tight_layout()
|
| 336 |
+
|
| 337 |
+
buffer = io.BytesIO()
|
| 338 |
+
plt.savefig(buffer, format="png", bbox_inches="tight")
|
| 339 |
+
buffer.seek(0)
|
| 340 |
+
plt.close()
|
| 341 |
+
return Image.open(buffer)
|
| 342 |
+
|
| 343 |
+
def get_symbolic_interpretation(self):
|
| 344 |
+
return self.symbolic_mapping.get(
|
| 345 |
+
self.rhythm_pattern, "Pattern Interpretation: Default"
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
def get_emotional_advice(self):
|
| 349 |
+
if not self.emotional_state:
|
| 350 |
+
return "No specific emotional state identified."
|
| 351 |
+
return self.emotional_info.get(self.emotional_state, {}).get(
|
| 352 |
+
"advice", "General well-being advice applies."
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
def get_complete_analysis(self):
|
| 356 |
+
analysis = []
|
| 357 |
+
|
| 358 |
+
if self.emotional_state:
|
| 359 |
+
emotion_info = self.emotional_info.get(self.emotional_state, {})
|
| 360 |
+
analysis.append(f"Detected State/Intention: {self.emotional_state.capitalize()}")
|
| 361 |
+
analysis.append(
|
| 362 |
+
f"Resonant Frequency: {self.base_freq} Hz - "
|
| 363 |
+
f"{emotion_info.get('name', 'Frequency Information')}"
|
| 364 |
+
)
|
| 365 |
+
analysis.append(
|
| 366 |
+
f"Guidance: {emotion_info.get('advice', 'Focus on the sound.')}"
|
| 367 |
+
)
|
| 368 |
+
else:
|
| 369 |
+
analysis.append(f"Using Manual Frequency: {self.base_freq} Hz")
|
| 370 |
+
analysis.append("Guidance: Tune into the custom frequency.")
|
| 371 |
+
|
| 372 |
+
analysis.append(f"Rhythm Pattern: {self.rhythm_pattern.capitalize()}")
|
| 373 |
+
analysis.append(
|
| 374 |
+
f"Symbolic Interpretation: {self.get_symbolic_interpretation()}"
|
| 375 |
+
)
|
| 376 |
+
analysis.append(f"Modulation Type: {self.modulation_type.capitalize()}")
|
| 377 |
+
return "\n\n".join(analysis)
|
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from rhythma import RhythmaModulationEngine, RhythmaSymphAICore
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def test_analyze_input_maps_stressed_text_to_relaxed_pattern():
|
| 6 |
+
core = RhythmaSymphAICore(use_groq=False, use_embeddings=False)
|
| 7 |
+
|
| 8 |
+
result = core.analyze_input("feeling stressed about work")
|
| 9 |
+
|
| 10 |
+
assert result["emotional_state"] == "stressed"
|
| 11 |
+
assert result["rhythm_pattern"] == "relaxed"
|
| 12 |
+
assert result["transcription"] == ""
|
| 13 |
+
assert result["error"] is None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_analyze_input_defaults_to_neutral_when_no_text_is_provided():
|
| 17 |
+
core = RhythmaSymphAICore(use_groq=False, use_embeddings=False)
|
| 18 |
+
|
| 19 |
+
result = core.analyze_input("")
|
| 20 |
+
|
| 21 |
+
assert result["emotional_state"] == "neutral"
|
| 22 |
+
assert result["rhythm_pattern"] == "calm"
|
| 23 |
+
assert result["transcription"] == ""
|
| 24 |
+
assert result["error"] is None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_generate_modulated_wave_has_expected_length_and_headroom():
|
| 28 |
+
engine = RhythmaModulationEngine(emotional_state="stressed")
|
| 29 |
+
|
| 30 |
+
wave = engine.generate_modulated_wave(0.1)
|
| 31 |
+
|
| 32 |
+
assert len(wave) == 4410
|
| 33 |
+
assert np.max(np.abs(wave)) <= 0.9 + 1e-9
|
| 34 |
+
assert engine.get_symbolic_interpretation().startswith("Resonating in the Circle")
|