Spaces:
Running
Polish Rhythma runtime and verify the redesigned session flow
Browse filesFinish the redesign by making the session profile drive the actual generation pipeline, locking the full path with an integration-style test, and preserving override behavior for legacy analysis payloads. This turns the earlier design work into a coherent end-to-end session flow instead of a collection of isolated upgrades.
Constraint: Final verification must stay honest about the missing local gradio runtime
Constraint: Legacy analysis payloads without session_profile must still respect explicit overrides
Rejected: Leaving the session profile disconnected from generation | would make the UI and audio disagree about the active session
Confidence: medium
Scope-risk: moderate
Reversibility: clean
Directive: Keep runtime session shaping and UI session copy in sync if later work expands controls or fallbacks
Tested: python -m py_compile app.py rhythma.py rhythma_analysis.py rhythma_engine.py tests/test_rhythma_profiles.py tests/test_rhythma_layered_audio.py tests/test_rhythma_regression.py tests/test_rhythma_ui_copy.py
Tested: PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 python -c "import pytest; raise SystemExit(pytest.main(['tests/test_rhythma_profiles.py','tests/test_rhythma_layered_audio.py','tests/test_rhythma_regression.py','tests/test_rhythma_ui_copy.py','-q','-p','no:cacheprovider']))"
Not-tested: Live Gradio startup in this environment because gradio is not installed locally
- app.py +63 -5
- tests/test_rhythma_ui_copy.py +109 -0
|
@@ -57,16 +57,35 @@ def generate_modulated_experience(analysis_result, base_freq=None, modulation_ty
|
|
| 57 |
return error_msg, None, None, None, None
|
| 58 |
|
| 59 |
emotional_state = analysis_result.get("emotional_state", "neutral")
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
try:
|
| 64 |
engine = RhythmaModulationEngine(
|
| 65 |
base_freq=final_base_freq,
|
| 66 |
-
modulation_type=
|
| 67 |
rhythm_pattern=final_rhythm_pattern,
|
| 68 |
emotional_state=emotional_state if not final_base_freq else None,
|
| 69 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
timestamp = int(time.time())
|
| 72 |
temp_dir = tempfile.gettempdir()
|
|
@@ -79,8 +98,18 @@ def generate_modulated_experience(analysis_result, base_freq=None, modulation_ty
|
|
| 79 |
|
| 80 |
fig = engine.visualize_waveform(duration)
|
| 81 |
waveform_image = engine.get_waveform_image()
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
return analysis_text, saved_audio_path, fig, waveform_image, symbolic
|
| 86 |
|
|
@@ -104,6 +133,35 @@ def normalize_rhythm_override(value):
|
|
| 104 |
return value
|
| 105 |
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
def build_session_copy(analysis_result):
|
| 108 |
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
|
| 109 |
profile = analysis_result.get("session_profile") or {}
|
|
|
|
| 57 |
return error_msg, None, None, None, None
|
| 58 |
|
| 59 |
emotional_state = analysis_result.get("emotional_state", "neutral")
|
| 60 |
+
session_profile = build_runtime_session_profile(
|
| 61 |
+
analysis_result,
|
| 62 |
+
base_freq=base_freq,
|
| 63 |
+
modulation_type=modulation_type,
|
| 64 |
+
rhythm_pattern=rhythm_pattern,
|
| 65 |
+
)
|
| 66 |
+
final_rhythm_pattern = (
|
| 67 |
+
session_profile.get("pattern")
|
| 68 |
+
or rhythm_pattern
|
| 69 |
+
or analysis_result.get("rhythm_pattern")
|
| 70 |
+
or "calm"
|
| 71 |
+
)
|
| 72 |
+
final_modulation_type = session_profile.get("modulation_type") or modulation_type
|
| 73 |
+
final_base_freq = session_profile.get("tone_center")
|
| 74 |
+
if not final_base_freq:
|
| 75 |
+
final_base_freq = base_freq if base_freq and base_freq > 0 else None
|
| 76 |
|
| 77 |
try:
|
| 78 |
engine = RhythmaModulationEngine(
|
| 79 |
base_freq=final_base_freq,
|
| 80 |
+
modulation_type=final_modulation_type,
|
| 81 |
rhythm_pattern=final_rhythm_pattern,
|
| 82 |
emotional_state=emotional_state if not final_base_freq else None,
|
| 83 |
)
|
| 84 |
+
if session_profile:
|
| 85 |
+
engine.generate_modulated_wave = (
|
| 86 |
+
lambda requested_duration, _engine=engine, _profile=dict(session_profile):
|
| 87 |
+
_engine.render_session(_profile, requested_duration)
|
| 88 |
+
)
|
| 89 |
|
| 90 |
timestamp = int(time.time())
|
| 91 |
temp_dir = tempfile.gettempdir()
|
|
|
|
| 98 |
|
| 99 |
fig = engine.visualize_waveform(duration)
|
| 100 |
waveform_image = engine.get_waveform_image()
|
| 101 |
+
if session_profile:
|
| 102 |
+
analysis_text = "\n\n".join(
|
| 103 |
+
[
|
| 104 |
+
f"Session: {session_profile.get('title', 'Session')}",
|
| 105 |
+
f"Tone: {session_profile.get('emotional_tone', emotional_state.title())}",
|
| 106 |
+
f"Listening Path: {session_profile.get('guidance', '')}",
|
| 107 |
+
]
|
| 108 |
+
)
|
| 109 |
+
symbolic = session_profile.get("reflection") or engine.get_symbolic_interpretation()
|
| 110 |
+
else:
|
| 111 |
+
analysis_text = engine.get_complete_analysis()
|
| 112 |
+
symbolic = engine.get_symbolic_interpretation()
|
| 113 |
|
| 114 |
return analysis_text, saved_audio_path, fig, waveform_image, symbolic
|
| 115 |
|
|
|
|
| 133 |
return value
|
| 134 |
|
| 135 |
|
| 136 |
+
def build_runtime_session_profile(
|
| 137 |
+
analysis_result,
|
| 138 |
+
base_freq=None,
|
| 139 |
+
modulation_type="sine",
|
| 140 |
+
rhythm_pattern=None,
|
| 141 |
+
):
|
| 142 |
+
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
|
| 143 |
+
profile = analysis_result.get("session_profile") or {}
|
| 144 |
+
if not profile:
|
| 145 |
+
return {}
|
| 146 |
+
|
| 147 |
+
if symphai_core is not None:
|
| 148 |
+
return symphai_core.apply_profile_overrides(
|
| 149 |
+
profile,
|
| 150 |
+
tone_center=base_freq,
|
| 151 |
+
modulation_type=modulation_type,
|
| 152 |
+
session_pattern=rhythm_pattern,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
shaped_profile = dict(profile)
|
| 156 |
+
if base_freq is not None and base_freq > 0:
|
| 157 |
+
shaped_profile["tone_center"] = base_freq
|
| 158 |
+
if modulation_type:
|
| 159 |
+
shaped_profile["modulation_type"] = modulation_type
|
| 160 |
+
if rhythm_pattern:
|
| 161 |
+
shaped_profile["pattern"] = rhythm_pattern
|
| 162 |
+
return shaped_profile
|
| 163 |
+
|
| 164 |
+
|
| 165 |
def build_session_copy(analysis_result):
|
| 166 |
analysis_result = analysis_result if isinstance(analysis_result, dict) else {}
|
| 167 |
profile = analysis_result.get("session_profile") or {}
|
|
@@ -127,3 +127,112 @@ def test_rhythma_experience_degrades_copy_consistently_on_generation_failure(mon
|
|
| 127 |
|
| 128 |
assert outputs[2] == "Generation unavailable"
|
| 129 |
assert outputs[6] == "Generation unavailable"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
assert outputs[2] == "Generation unavailable"
|
| 129 |
assert outputs[6] == "Generation unavailable"
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def test_rhythma_experience_uses_real_session_profile_across_pipeline(monkeypatch):
|
| 133 |
+
app = import_app_with_gradio_stub()
|
| 134 |
+
captured = {}
|
| 135 |
+
|
| 136 |
+
class FakeEngine:
|
| 137 |
+
def __init__(self, **kwargs):
|
| 138 |
+
captured["init_kwargs"] = kwargs
|
| 139 |
+
self.generated_audio = None
|
| 140 |
+
|
| 141 |
+
def render_session(self, profile, duration):
|
| 142 |
+
captured["render_profile"] = profile
|
| 143 |
+
captured["render_duration"] = duration
|
| 144 |
+
return "layered-session-audio"
|
| 145 |
+
|
| 146 |
+
def generate_modulated_wave(self, duration):
|
| 147 |
+
captured["generated_duration"] = duration
|
| 148 |
+
return "legacy-generated-audio"
|
| 149 |
+
|
| 150 |
+
def save_audio(self, duration, file_path):
|
| 151 |
+
captured["save_duration"] = duration
|
| 152 |
+
captured["save_path"] = file_path
|
| 153 |
+
self.generated_audio = self.generate_modulated_wave(duration)
|
| 154 |
+
return file_path
|
| 155 |
+
|
| 156 |
+
def visualize_waveform(self, duration):
|
| 157 |
+
captured["visualize_duration"] = duration
|
| 158 |
+
return "plot"
|
| 159 |
+
|
| 160 |
+
def get_waveform_image(self):
|
| 161 |
+
return "image"
|
| 162 |
+
|
| 163 |
+
def get_complete_analysis(self):
|
| 164 |
+
return "legacy analysis"
|
| 165 |
+
|
| 166 |
+
def get_symbolic_interpretation(self):
|
| 167 |
+
return "legacy symbolic"
|
| 168 |
+
|
| 169 |
+
monkeypatch.setattr(app, "RhythmaModulationEngine", FakeEngine)
|
| 170 |
+
|
| 171 |
+
outputs = app.rhythma_experience(
|
| 172 |
+
"I want to focus on deep work",
|
| 173 |
+
None,
|
| 174 |
+
override_freq=0,
|
| 175 |
+
override_modulation="sine",
|
| 176 |
+
override_rhythm="Automatic",
|
| 177 |
+
duration=5,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
assert outputs[0] == "### Clear Horizon"
|
| 181 |
+
assert outputs[1] == "Attentive and composed"
|
| 182 |
+
assert outputs[2].startswith("Attentive and composed")
|
| 183 |
+
assert outputs[6] == "This session narrows motion to support sustained attention."
|
| 184 |
+
assert outputs[7] == ""
|
| 185 |
+
assert captured["init_kwargs"] == {
|
| 186 |
+
"base_freq": 512.0,
|
| 187 |
+
"modulation_type": "sine",
|
| 188 |
+
"rhythm_pattern": "focused",
|
| 189 |
+
"emotional_state": None,
|
| 190 |
+
}
|
| 191 |
+
assert captured["render_profile"]["title"] == "Clear Horizon"
|
| 192 |
+
assert captured["render_profile"]["tone_center"] == 512.0
|
| 193 |
+
assert captured["render_profile"]["pattern"] == "focused"
|
| 194 |
+
assert captured["render_duration"] == 5
|
| 195 |
+
assert captured["save_duration"] == 5
|
| 196 |
+
assert captured["visualize_duration"] == 5
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def test_generate_modulated_experience_prefers_explicit_rhythm_override_without_session_profile(monkeypatch):
|
| 200 |
+
app = import_app_with_gradio_stub()
|
| 201 |
+
captured = {}
|
| 202 |
+
|
| 203 |
+
class FakeEngine:
|
| 204 |
+
def __init__(self, **kwargs):
|
| 205 |
+
captured["init_kwargs"] = kwargs
|
| 206 |
+
|
| 207 |
+
def save_audio(self, duration, file_path):
|
| 208 |
+
return file_path
|
| 209 |
+
|
| 210 |
+
def visualize_waveform(self, duration):
|
| 211 |
+
return "plot"
|
| 212 |
+
|
| 213 |
+
def get_waveform_image(self):
|
| 214 |
+
return "image"
|
| 215 |
+
|
| 216 |
+
def get_complete_analysis(self):
|
| 217 |
+
return "legacy analysis"
|
| 218 |
+
|
| 219 |
+
def get_symbolic_interpretation(self):
|
| 220 |
+
return "legacy symbolic"
|
| 221 |
+
|
| 222 |
+
monkeypatch.setattr(app, "RhythmaModulationEngine", FakeEngine)
|
| 223 |
+
|
| 224 |
+
analysis = {
|
| 225 |
+
"emotional_state": "neutral",
|
| 226 |
+
"rhythm_pattern": "calm",
|
| 227 |
+
"transcription": "",
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
result = app.generate_modulated_experience(
|
| 231 |
+
analysis,
|
| 232 |
+
modulation_type="sine",
|
| 233 |
+
rhythm_pattern="focused",
|
| 234 |
+
duration=5,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
assert result[0] == "legacy analysis"
|
| 238 |
+
assert captured["init_kwargs"]["rhythm_pattern"] == "focused"
|