ciaochris's picture
Update app.py
2b9da54 verified
import os
import logging
try:
import gradio as gr
except ModuleNotFoundError: # pragma: no cover - local test environments may omit gradio
gr = None
import matplotlib
matplotlib.use('Agg')
import tempfile
import time
from rhythma import RhythmaModulationEngine, RhythmaSymphAICore
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
# --- Environment Variable Check ---
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
use_groq = bool(GROQ_API_KEY) # True only if key exists and is not empty
if not use_groq:
LOGGER.warning(
"GROQ_API_KEY not found. Advanced LLM analysis and transcription are disabled."
)
else:
LOGGER.info("GROQ_API_KEY found. Enabling Groq features.")
# --- End Environment Variable Check ---
# --- Initialize Core Components ---
try:
symphai_core = RhythmaSymphAICore(use_groq=use_groq)
except Exception as e:
LOGGER.exception("Could not initialize RhythmaSymphAICore: %s", e)
symphai_core = None
# --- End Initialization ---
# --- Core Functions ---
def analyze_input(input_text=None, audio_input=None):
if symphai_core is None:
return {"error": "Analysis Core failed to initialize."}
audio_filepath = audio_input if isinstance(audio_input, str) else None
return symphai_core.analyze_input(input_text or "", audio_filepath)
def generate_modulated_experience(analysis_result, base_freq=None, modulation_type="sine", rhythm_pattern=None, duration=5):
if not isinstance(analysis_result, dict):
error_msg = "Internal error: analysis result is not in the expected format."
LOGGER.error(error_msg)
return error_msg, None, None, None, None
if analysis_result.get("error"):
error_msg = f"Analysis Error: {analysis_result['error']}"
LOGGER.error(error_msg)
return error_msg, None, None, None, None
emotional_state = analysis_result.get("emotional_state", "neutral")
session_profile = build_runtime_session_profile(
analysis_result,
base_freq=base_freq,
modulation_type=modulation_type,
rhythm_pattern=rhythm_pattern,
)
final_rhythm_pattern = (
session_profile.get("pattern")
or rhythm_pattern
or analysis_result.get("rhythm_pattern")
or "calm"
)
final_modulation_type = session_profile.get("modulation_type") or modulation_type
final_base_freq = session_profile.get("tone_center")
if not final_base_freq:
final_base_freq = base_freq if base_freq and base_freq > 0 else None
try:
engine = RhythmaModulationEngine(
base_freq=final_base_freq,
modulation_type=final_modulation_type,
rhythm_pattern=final_rhythm_pattern,
emotional_state=emotional_state if not final_base_freq else None,
)
if session_profile:
engine.generate_modulated_wave = (
lambda requested_duration, _engine=engine, _profile=dict(session_profile):
_engine.render_session(_profile, requested_duration)
)
timestamp = int(time.time())
temp_dir = tempfile.gettempdir()
os.makedirs(temp_dir, exist_ok=True)
audio_file = os.path.join(temp_dir, f"rhythma_{timestamp}.wav")
saved_audio_path = engine.save_audio(duration, audio_file)
if not saved_audio_path:
raise RuntimeError("Failed to save generated audio file.")
fig = engine.visualize_waveform(duration)
waveform_image = engine.get_waveform_image()
if session_profile:
analysis_text = "\n\n".join(
[
f"Session: {session_profile.get('title', 'Session')}",
f"Tone: {session_profile.get('emotional_tone', emotional_state.title())}",
f"Listening Path: {session_profile.get('guidance', '')}",
]
)
symbolic = session_profile.get("reflection") or engine.get_symbolic_interpretation()
else:
analysis_text = engine.get_complete_analysis()
symbolic = engine.get_symbolic_interpretation()
return analysis_text, saved_audio_path, fig, waveform_image, symbolic
except Exception as e:
error_msg = f"Error during Rhythma generation: {e}"
LOGGER.exception(error_msg)
return error_msg, None, None, None, None
def coerce_frequency(value):
try:
numeric_value = float(value)
except (TypeError, ValueError):
return 0.0
return numeric_value if numeric_value > 0 else 0.0
def normalize_rhythm_override(value):
if value in (None, "", "Automatic"):
return None
return value
SESSION_EXAMPLES = [
["I need something steady before a difficult conversation.", None, None, "sine", "Automatic", 12],
["I want to feel grounded and open as the evening slows down.", None, None, "sine", "Automatic", 18],
["I need a clear horizon for deep work.", None, None, "sine", "focused", 20],
["Everything feels loud and I want a softer landing.", None, None, "sine", "Automatic", 14],
["I feel bright and want a livelier pulse without losing calm.", None, None, "pulse", "active", 10],
["Give me a long unwind after a heavy day.", None, None, "sine", "relaxed", 30],
["I want a gentle session for a low-energy morning.", None, None, "sine", "Automatic", 16],
]
def build_runtime_session_profile(
analysis_result,
base_freq=None,
modulation_type="sine",
rhythm_pattern=None,
):
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
profile = analysis_result.get("session_profile") or {}
if not profile:
return {}
if symphai_core is not None:
return symphai_core.apply_profile_overrides(
profile,
tone_center=base_freq,
modulation_type=modulation_type,
session_pattern=rhythm_pattern,
)
shaped_profile = dict(profile)
if base_freq is not None and base_freq > 0:
shaped_profile["tone_center"] = base_freq
if modulation_type:
shaped_profile["modulation_type"] = modulation_type
if rhythm_pattern:
shaped_profile["pattern"] = rhythm_pattern
return shaped_profile
def build_session_copy(analysis_result):
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
profile = analysis_result.get("session_profile") or {}
emotional_state = analysis_result.get("emotional_state", "neutral")
session_name = profile.get("title") or f"{emotional_state.title()} Session"
emotional_tone = profile.get("emotional_tone") or "Measured and receptive"
guidance = profile.get("guidance") or "Stay with the pulse until your breath settles into its own pace."
reflection = profile.get("reflection") or "This session offers a gentle reset without pushing for intensity."
return {
"session_name": session_name,
"emotional_tone": emotional_tone,
"listening_path": f"{emotional_tone}. {guidance}",
"session_reflection": reflection,
"tone_center": profile.get("tone_center"),
"session_pattern": profile.get("pattern"),
}
def rhythma_experience(
input_text, audio_input,
override_freq=None,
override_modulation="sine",
override_rhythm=None,
duration=5
):
input_text = input_text.strip() if input_text else ""
freq_override_value = coerce_frequency(override_freq)
analysis = analyze_input(input_text, audio_input)
session_copy = build_session_copy(analysis)
analysis_text, audio_file, fig, waveform_image, symbolic = generate_modulated_experience(
analysis,
base_freq=freq_override_value,
modulation_type=override_modulation,
rhythm_pattern=normalize_rhythm_override(override_rhythm),
duration=duration
)
transcription = analysis.get("transcription", "") if isinstance(analysis, dict) else ""
plot_output = fig if fig else None
listening_path = session_copy["listening_path"]
session_reflection = session_copy["session_reflection"]
if audio_file is None and isinstance(analysis_text, str) and analysis_text:
fallback_message = analysis_text or symbolic or "Session generation is unavailable right now."
listening_path = fallback_message
session_reflection = fallback_message
return (
f"### {session_copy['session_name']}",
session_copy["emotional_tone"],
listening_path,
audio_file,
plot_output,
waveform_image,
session_reflection,
transcription,
)
# --- Create the Gradio Interface ---
def create_interface():
if gr is None:
raise ModuleNotFoundError("gradio is required to create the Rhythma interface.")
with gr.Blocks(theme=gr.themes.Soft(primary_hue="rose", secondary_hue="stone"), title="Rhythma") as demo:
gr.Markdown("# Rhythma")
gr.Markdown("### An artful wellness companion for reflective listening.")
if not use_groq:
gr.Warning(
"Groq analysis is unavailable. Text-led sessions still work, but live voice transcription is off."
)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("**1. Share what you're carrying**")
input_text = gr.Textbox(
label="How are you feeling, or what intention would you like to hold?",
placeholder="e.g., 'I need something steady before a conversation' or 'I need room to chill after a long day.'",
lines=4
)
gr.Markdown("**Optional: add a voice note (requires Groq)**")
audio_input = gr.Audio(
sources=["microphone"],
type="filepath",
label="Record or Upload a Voice Note" if use_groq else "Voice Note (Disabled)",
interactive=use_groq
)
with gr.Accordion("Session shaping controls", open=False):
override_freq = gr.Number(
value=None,
minimum=0,
precision=0,
label="Tone Center (Hz)",
placeholder="Automatic",
info="Leave blank to let Rhythma choose a tone center from your session profile."
)
override_modulation = gr.Dropdown(
choices=["sine", "pulse", "chirp"],
value="sine",
label="Texture Shape"
)
available_patterns = list(RhythmaModulationEngine().rhythm_configs.keys())
override_rhythm = gr.Dropdown(
choices=["Automatic"] + available_patterns,
value="Automatic",
label="Session Pattern",
info="Leave on Automatic to follow the pattern inferred from your session profile."
)
duration = gr.Slider(
minimum=3, maximum=60, value=10, step=1,
label="Session Length (seconds)"
)
generate_button = gr.Button("Begin Exploration", variant="primary", scale=2)
with gr.Column(scale=2):
gr.Markdown("**2. Receive your listening session**")
gr.Markdown(
"_Rhythma shapes a named listening path, then renders the audio, reflection, and waveform around it._"
)
session_name_output = gr.Markdown("### Session")
emotional_tone_output = gr.Markdown("Measured and receptive")
listening_path_output = gr.Textbox(label="Your Listening Path", lines=6, interactive=False)
with gr.Row():
audio_output = gr.Audio(label="Session Audio", type="filepath", interactive=False)
waveform_simple = gr.Image(label="Tone Center", interactive=False, height=100, width=200)
waveform_plot = gr.Plot(label="Session Pattern")
symbolic_output = gr.Textbox(label="Session Reflection", interactive=False)
transcription_output = gr.Textbox(
label="Transcribed Voice Note",
interactive=False,
visible=use_groq
)
generate_button.click(
fn=rhythma_experience,
inputs=[
input_text, audio_input,
override_freq, override_modulation, override_rhythm,
duration
],
outputs=[
session_name_output, emotional_tone_output, listening_path_output,
audio_output, waveform_plot, waveform_simple, symbolic_output,
transcription_output
]
)
gr.Examples(
examples=SESSION_EXAMPLES,
inputs=[input_text, audio_input, override_freq, override_modulation, override_rhythm, duration],
outputs=[
session_name_output, emotional_tone_output, listening_path_output,
audio_output, waveform_plot, waveform_simple, symbolic_output,
transcription_output
],
fn=rhythma_experience,
cache_examples=False
)
gr.Markdown("---")
gr.Markdown("""
## About Rhythma
Rhythma is an artful wellness companion that turns a felt state into a reflective listening session.
It uses optional AI analysis, session profiling, and rhythmic sound design to shape a tone center, pattern, and guided path for the moment you are in.
**Note:** Rhythma is for reflective listening and personal wellbeing rituals. It is not medical advice or a clinical treatment.
© 2026 Vers3Dynamics
""")
return demo
# --- Run the Gradio App ---
if __name__ == "__main__":
if symphai_core is None:
LOGGER.error("Cannot launch Gradio app because RhythmaSymphAICore failed to initialize.")
elif gr is None:
LOGGER.error("Cannot launch Gradio app because gradio is not installed.")
else:
app_demo = create_interface()
app_demo.launch()