Spaces:
Running
Running
File size: 14,408 Bytes
9ea7aeb b27118b 1ad671f c2a3aa3 b27118b c2a3aa3 b27118b 0803119 c2a3aa3 b27118b c2a3aa3 b27118b c2a3aa3 b27118b c2a3aa3 0803119 c2a3aa3 b27118b c2a3aa3 b27118b c2a3aa3 b27118b c2a3aa3 b27118b f511f7a c2a3aa3 79b0eba b27118b f511f7a c2a3aa3 b27118b 79b0eba f511f7a 0803119 c2a3aa3 0803119 b27118b 0803119 79b0eba c2a3aa3 f511f7a c2a3aa3 0803119 c2a3aa3 b27118b c2a3aa3 b27118b 93d65fd f511f7a 1ad671f c2a3aa3 b27118b c2a3aa3 1ad671f c2a3aa3 b27118b c2a3aa3 b27118b c2a3aa3 b27118b 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f 779c195 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f 93d65fd 1ad671f 93d65fd c2a3aa3 1ad671f c2a3aa3 b27118b 1ad671f c2a3aa3 1ad671f c2a3aa3 2b9da54 c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 1ad671f c2a3aa3 93d65fd c2a3aa3 1ad671f b27118b c2a3aa3 0803119 c2a3aa3 1ad671f 2b9da54 c2a3aa3 f9dc6d5 c2a3aa3 2f0b9e7 c2a3aa3 f9dc6d5 c2a3aa3 b27118b 1ad671f c2a3aa3 b27118b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 | import os
import logging
try:
import gradio as gr
except ModuleNotFoundError: # pragma: no cover - local test environments may omit gradio
gr = None
import matplotlib
matplotlib.use('Agg')
import tempfile
import time
from rhythma import RhythmaModulationEngine, RhythmaSymphAICore
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
# --- Environment Variable Check ---
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
use_groq = bool(GROQ_API_KEY) # True only if key exists and is not empty
if not use_groq:
LOGGER.warning(
"GROQ_API_KEY not found. Advanced LLM analysis and transcription are disabled."
)
else:
LOGGER.info("GROQ_API_KEY found. Enabling Groq features.")
# --- End Environment Variable Check ---
# --- Initialize Core Components ---
try:
symphai_core = RhythmaSymphAICore(use_groq=use_groq)
except Exception as e:
LOGGER.exception("Could not initialize RhythmaSymphAICore: %s", e)
symphai_core = None
# --- End Initialization ---
# --- Core Functions ---
def analyze_input(input_text=None, audio_input=None):
if symphai_core is None:
return {"error": "Analysis Core failed to initialize."}
audio_filepath = audio_input if isinstance(audio_input, str) else None
return symphai_core.analyze_input(input_text or "", audio_filepath)
def generate_modulated_experience(analysis_result, base_freq=None, modulation_type="sine", rhythm_pattern=None, duration=5):
if not isinstance(analysis_result, dict):
error_msg = "Internal error: analysis result is not in the expected format."
LOGGER.error(error_msg)
return error_msg, None, None, None, None
if analysis_result.get("error"):
error_msg = f"Analysis Error: {analysis_result['error']}"
LOGGER.error(error_msg)
return error_msg, None, None, None, None
emotional_state = analysis_result.get("emotional_state", "neutral")
session_profile = build_runtime_session_profile(
analysis_result,
base_freq=base_freq,
modulation_type=modulation_type,
rhythm_pattern=rhythm_pattern,
)
final_rhythm_pattern = (
session_profile.get("pattern")
or rhythm_pattern
or analysis_result.get("rhythm_pattern")
or "calm"
)
final_modulation_type = session_profile.get("modulation_type") or modulation_type
final_base_freq = session_profile.get("tone_center")
if not final_base_freq:
final_base_freq = base_freq if base_freq and base_freq > 0 else None
try:
engine = RhythmaModulationEngine(
base_freq=final_base_freq,
modulation_type=final_modulation_type,
rhythm_pattern=final_rhythm_pattern,
emotional_state=emotional_state if not final_base_freq else None,
)
if session_profile:
engine.generate_modulated_wave = (
lambda requested_duration, _engine=engine, _profile=dict(session_profile):
_engine.render_session(_profile, requested_duration)
)
timestamp = int(time.time())
temp_dir = tempfile.gettempdir()
os.makedirs(temp_dir, exist_ok=True)
audio_file = os.path.join(temp_dir, f"rhythma_{timestamp}.wav")
saved_audio_path = engine.save_audio(duration, audio_file)
if not saved_audio_path:
raise RuntimeError("Failed to save generated audio file.")
fig = engine.visualize_waveform(duration)
waveform_image = engine.get_waveform_image()
if session_profile:
analysis_text = "\n\n".join(
[
f"Session: {session_profile.get('title', 'Session')}",
f"Tone: {session_profile.get('emotional_tone', emotional_state.title())}",
f"Listening Path: {session_profile.get('guidance', '')}",
]
)
symbolic = session_profile.get("reflection") or engine.get_symbolic_interpretation()
else:
analysis_text = engine.get_complete_analysis()
symbolic = engine.get_symbolic_interpretation()
return analysis_text, saved_audio_path, fig, waveform_image, symbolic
except Exception as e:
error_msg = f"Error during Rhythma generation: {e}"
LOGGER.exception(error_msg)
return error_msg, None, None, None, None
def coerce_frequency(value):
try:
numeric_value = float(value)
except (TypeError, ValueError):
return 0.0
return numeric_value if numeric_value > 0 else 0.0
def normalize_rhythm_override(value):
if value in (None, "", "Automatic"):
return None
return value
SESSION_EXAMPLES = [
["I need something steady before a difficult conversation.", None, None, "sine", "Automatic", 12],
["I want to feel grounded and open as the evening slows down.", None, None, "sine", "Automatic", 18],
["I need a clear horizon for deep work.", None, None, "sine", "focused", 20],
["Everything feels loud and I want a softer landing.", None, None, "sine", "Automatic", 14],
["I feel bright and want a livelier pulse without losing calm.", None, None, "pulse", "active", 10],
["Give me a long unwind after a heavy day.", None, None, "sine", "relaxed", 30],
["I want a gentle session for a low-energy morning.", None, None, "sine", "Automatic", 16],
]
def build_runtime_session_profile(
analysis_result,
base_freq=None,
modulation_type="sine",
rhythm_pattern=None,
):
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
profile = analysis_result.get("session_profile") or {}
if not profile:
return {}
if symphai_core is not None:
return symphai_core.apply_profile_overrides(
profile,
tone_center=base_freq,
modulation_type=modulation_type,
session_pattern=rhythm_pattern,
)
shaped_profile = dict(profile)
if base_freq is not None and base_freq > 0:
shaped_profile["tone_center"] = base_freq
if modulation_type:
shaped_profile["modulation_type"] = modulation_type
if rhythm_pattern:
shaped_profile["pattern"] = rhythm_pattern
return shaped_profile
def build_session_copy(analysis_result):
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
profile = analysis_result.get("session_profile") or {}
emotional_state = analysis_result.get("emotional_state", "neutral")
session_name = profile.get("title") or f"{emotional_state.title()} Session"
emotional_tone = profile.get("emotional_tone") or "Measured and receptive"
guidance = profile.get("guidance") or "Stay with the pulse until your breath settles into its own pace."
reflection = profile.get("reflection") or "This session offers a gentle reset without pushing for intensity."
return {
"session_name": session_name,
"emotional_tone": emotional_tone,
"listening_path": f"{emotional_tone}. {guidance}",
"session_reflection": reflection,
"tone_center": profile.get("tone_center"),
"session_pattern": profile.get("pattern"),
}
def rhythma_experience(
input_text, audio_input,
override_freq=None,
override_modulation="sine",
override_rhythm=None,
duration=5
):
input_text = input_text.strip() if input_text else ""
freq_override_value = coerce_frequency(override_freq)
analysis = analyze_input(input_text, audio_input)
session_copy = build_session_copy(analysis)
analysis_text, audio_file, fig, waveform_image, symbolic = generate_modulated_experience(
analysis,
base_freq=freq_override_value,
modulation_type=override_modulation,
rhythm_pattern=normalize_rhythm_override(override_rhythm),
duration=duration
)
transcription = analysis.get("transcription", "") if isinstance(analysis, dict) else ""
plot_output = fig if fig else None
listening_path = session_copy["listening_path"]
session_reflection = session_copy["session_reflection"]
if audio_file is None and isinstance(analysis_text, str) and analysis_text:
fallback_message = analysis_text or symbolic or "Session generation is unavailable right now."
listening_path = fallback_message
session_reflection = fallback_message
return (
f"### {session_copy['session_name']}",
session_copy["emotional_tone"],
listening_path,
audio_file,
plot_output,
waveform_image,
session_reflection,
transcription,
)
# --- Create the Gradio Interface ---
def create_interface():
if gr is None:
raise ModuleNotFoundError("gradio is required to create the Rhythma interface.")
with gr.Blocks(theme=gr.themes.Soft(primary_hue="rose", secondary_hue="stone"), title="Rhythma") as demo:
gr.Markdown("# Rhythma")
gr.Markdown("### An artful wellness companion for reflective listening.")
if not use_groq:
gr.Warning(
"Groq analysis is unavailable. Text-led sessions still work, but live voice transcription is off."
)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("**1. Share what you're carrying**")
input_text = gr.Textbox(
label="How are you feeling, or what intention would you like to hold?",
placeholder="e.g., 'I need something steady before a conversation' or 'I need room to chill after a long day.'",
lines=4
)
gr.Markdown("**Optional: add a voice note (requires Groq)**")
audio_input = gr.Audio(
sources=["microphone"],
type="filepath",
label="Record or Upload a Voice Note" if use_groq else "Voice Note (Disabled)",
interactive=use_groq
)
with gr.Accordion("Session shaping controls", open=False):
override_freq = gr.Number(
value=None,
minimum=0,
precision=0,
label="Tone Center (Hz)",
placeholder="Automatic",
info="Leave blank to let Rhythma choose a tone center from your session profile."
)
override_modulation = gr.Dropdown(
choices=["sine", "pulse", "chirp"],
value="sine",
label="Texture Shape"
)
available_patterns = list(RhythmaModulationEngine().rhythm_configs.keys())
override_rhythm = gr.Dropdown(
choices=["Automatic"] + available_patterns,
value="Automatic",
label="Session Pattern",
info="Leave on Automatic to follow the pattern inferred from your session profile."
)
duration = gr.Slider(
minimum=3, maximum=60, value=10, step=1,
label="Session Length (seconds)"
)
generate_button = gr.Button("Begin Exploration", variant="primary", scale=2)
with gr.Column(scale=2):
gr.Markdown("**2. Receive your listening session**")
gr.Markdown(
"_Rhythma shapes a named listening path, then renders the audio, reflection, and waveform around it._"
)
session_name_output = gr.Markdown("### Session")
emotional_tone_output = gr.Markdown("Measured and receptive")
listening_path_output = gr.Textbox(label="Your Listening Path", lines=6, interactive=False)
with gr.Row():
audio_output = gr.Audio(label="Session Audio", type="filepath", interactive=False)
waveform_simple = gr.Image(label="Tone Center", interactive=False, height=100, width=200)
waveform_plot = gr.Plot(label="Session Pattern")
symbolic_output = gr.Textbox(label="Session Reflection", interactive=False)
transcription_output = gr.Textbox(
label="Transcribed Voice Note",
interactive=False,
visible=use_groq
)
generate_button.click(
fn=rhythma_experience,
inputs=[
input_text, audio_input,
override_freq, override_modulation, override_rhythm,
duration
],
outputs=[
session_name_output, emotional_tone_output, listening_path_output,
audio_output, waveform_plot, waveform_simple, symbolic_output,
transcription_output
]
)
gr.Examples(
examples=SESSION_EXAMPLES,
inputs=[input_text, audio_input, override_freq, override_modulation, override_rhythm, duration],
outputs=[
session_name_output, emotional_tone_output, listening_path_output,
audio_output, waveform_plot, waveform_simple, symbolic_output,
transcription_output
],
fn=rhythma_experience,
cache_examples=False
)
gr.Markdown("---")
gr.Markdown("""
## About Rhythma
Rhythma is an artful wellness companion that turns a felt state into a reflective listening session.
It uses optional AI analysis, session profiling, and rhythmic sound design to shape a tone center, pattern, and guided path for the moment you are in.
**Note:** Rhythma is for reflective listening and personal wellbeing rituals. It is not medical advice or a clinical treatment.
© 2026 Vers3Dynamics
""")
return demo
# --- Run the Gradio App ---
if __name__ == "__main__":
if symphai_core is None:
LOGGER.error("Cannot launch Gradio app because RhythmaSymphAICore failed to initialize.")
elif gr is None:
LOGGER.error("Cannot launch Gradio app because gradio is not installed.")
else:
app_demo = create_interface()
app_demo.launch()
|