Spaces:
Running
Introduce session profiles for Rhythma analysis
Browse filesAdd a session-profile layer to the analysis core so the redesign has a stable bridge between emotional input and the richer ambient renderer. This keeps the current text interpretation path intact while giving downstream UI and audio code a consistent session object to build from.
Constraint: Task 1 must stay scoped to analysis and tests only
Constraint: Explicit emotional keywords must keep precedence over focus-intent shortcuts
Rejected: Neutral fallback for unsupported emotions | created emotional_state/session_profile mismatches
Confidence: medium
Scope-risk: narrow
Reversibility: clean
Directive: Keep session_profile aligned with emotional_state for every supported state before expanding the renderer or UI
Tested: python -m py_compile rhythma_analysis.py tests/test_rhythma_profiles.py
Tested: PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 python -c "import pytest; raise SystemExit(pytest.main(['tests/test_rhythma_profiles.py','-q','-p','no:cacheprovider']))"
Not-tested: Full app runtime or Gradio wiring; deferred to later tasks in the plan
- rhythma_analysis.py +238 -0
- tests/test_rhythma_profiles.py +51 -0
|
@@ -23,12 +23,33 @@ class AnalysisResult:
|
|
| 23 |
emotional_state: str = "neutral"
|
| 24 |
rhythm_pattern: str = "calm"
|
| 25 |
transcription: str = ""
|
|
|
|
| 26 |
error: str | None = None
|
| 27 |
|
| 28 |
def to_dict(self):
|
| 29 |
return asdict(self)
|
| 30 |
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
def _cosine_similarity(left, right):
|
| 33 |
denominator = np.linalg.norm(left) * np.linalg.norm(right)
|
| 34 |
if denominator == 0:
|
|
@@ -36,6 +57,190 @@ def _cosine_similarity(left, right):
|
|
| 36 |
return float(np.dot(left, right) / denominator)
|
| 37 |
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
class RhythmaSymphAICore:
|
| 40 |
"""
|
| 41 |
Interprets text and audio input to determine emotional state and rhythm pattern.
|
|
@@ -150,6 +355,9 @@ class RhythmaSymphAICore:
|
|
| 150 |
if state in words or state in input_text_lower:
|
| 151 |
return state
|
| 152 |
|
|
|
|
|
|
|
|
|
|
| 153 |
self._ensure_embeddings_loaded()
|
| 154 |
if self.embedding_model and self.emotional_embeddings:
|
| 155 |
try:
|
|
@@ -198,6 +406,31 @@ class RhythmaSymphAICore:
|
|
| 198 |
|
| 199 |
return "calm"
|
| 200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
def transcribe_audio(self, audio_path):
|
| 202 |
if not self.use_groq or not self.groq_client:
|
| 203 |
return None, "Transcription disabled: Groq client not available or API key missing."
|
|
@@ -249,9 +482,14 @@ class RhythmaSymphAICore:
|
|
| 249 |
input_text=text_to_analyze,
|
| 250 |
emotional_state=result.emotional_state,
|
| 251 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
except Exception as exc:
|
| 253 |
LOGGER.exception("Unexpected error during input analysis.")
|
| 254 |
result = AnalysisResult(
|
|
|
|
| 255 |
transcription=result.transcription,
|
| 256 |
error=f"Unexpected error during input analysis: {exc}",
|
| 257 |
)
|
|
|
|
| 23 |
emotional_state: str = "neutral"
|
| 24 |
rhythm_pattern: str = "calm"
|
| 25 |
transcription: str = ""
|
| 26 |
+
session_profile: dict | None = None
|
| 27 |
error: str | None = None
|
| 28 |
|
| 29 |
def to_dict(self):
|
| 30 |
return asdict(self)
|
| 31 |
|
| 32 |
|
| 33 |
+
@dataclass(frozen=True)
|
| 34 |
+
class SessionProfile:
|
| 35 |
+
key: str
|
| 36 |
+
title: str
|
| 37 |
+
emotional_tone: str
|
| 38 |
+
tone_center: float
|
| 39 |
+
pattern: str
|
| 40 |
+
modulation_type: str
|
| 41 |
+
guidance: str
|
| 42 |
+
reflection: str
|
| 43 |
+
duration_hint: int
|
| 44 |
+
brightness: float
|
| 45 |
+
density: float
|
| 46 |
+
shimmer: float
|
| 47 |
+
breath_rate: float
|
| 48 |
+
|
| 49 |
+
def to_dict(self):
|
| 50 |
+
return asdict(self)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
def _cosine_similarity(left, right):
|
| 54 |
denominator = np.linalg.norm(left) * np.linalg.norm(right)
|
| 55 |
if denominator == 0:
|
|
|
|
| 57 |
return float(np.dot(left, right) / denominator)
|
| 58 |
|
| 59 |
|
| 60 |
+
SESSION_PRESETS = {
|
| 61 |
+
"anxious": SessionProfile(
|
| 62 |
+
key="anxious",
|
| 63 |
+
title="Grounding Tide",
|
| 64 |
+
emotional_tone="Settling and steady",
|
| 65 |
+
tone_center=396.0,
|
| 66 |
+
pattern="calm",
|
| 67 |
+
modulation_type="sine",
|
| 68 |
+
guidance="Let your breath fall behind the pulse until the session feels steady.",
|
| 69 |
+
reflection="This session favors stability over intensity.",
|
| 70 |
+
duration_hint=15,
|
| 71 |
+
brightness=0.25,
|
| 72 |
+
density=0.45,
|
| 73 |
+
shimmer=0.12,
|
| 74 |
+
breath_rate=0.08,
|
| 75 |
+
),
|
| 76 |
+
"stressed": SessionProfile(
|
| 77 |
+
key="stressed",
|
| 78 |
+
title="Soft Landing",
|
| 79 |
+
emotional_tone="Unwinding and spacious",
|
| 80 |
+
tone_center=417.0,
|
| 81 |
+
pattern="relaxed",
|
| 82 |
+
modulation_type="sine",
|
| 83 |
+
guidance="Let the longer exhale soften the edges of the session.",
|
| 84 |
+
reflection="This session eases pressure by widening the pulse.",
|
| 85 |
+
duration_hint=18,
|
| 86 |
+
brightness=0.22,
|
| 87 |
+
density=0.38,
|
| 88 |
+
shimmer=0.1,
|
| 89 |
+
breath_rate=0.07,
|
| 90 |
+
),
|
| 91 |
+
"calm": SessionProfile(
|
| 92 |
+
key="calm",
|
| 93 |
+
title="Quiet Harbor",
|
| 94 |
+
emotional_tone="Easeful and settled",
|
| 95 |
+
tone_center=432.0,
|
| 96 |
+
pattern="calm",
|
| 97 |
+
modulation_type="sine",
|
| 98 |
+
guidance="Rest inside the repeating tone until it feels effortless.",
|
| 99 |
+
reflection="This session keeps motion light to support an even mood.",
|
| 100 |
+
duration_hint=15,
|
| 101 |
+
brightness=0.32,
|
| 102 |
+
density=0.28,
|
| 103 |
+
shimmer=0.11,
|
| 104 |
+
breath_rate=0.09,
|
| 105 |
+
),
|
| 106 |
+
"sad": SessionProfile(
|
| 107 |
+
key="sad",
|
| 108 |
+
title="Low Ember",
|
| 109 |
+
emotional_tone="Tender and reflective",
|
| 110 |
+
tone_center=341.3,
|
| 111 |
+
pattern="relaxed",
|
| 112 |
+
modulation_type="sine",
|
| 113 |
+
guidance="Allow the lower tone to hold the feeling without forcing it to lift.",
|
| 114 |
+
reflection="This session gives weight and warmth to slower emotion.",
|
| 115 |
+
duration_hint=16,
|
| 116 |
+
brightness=0.18,
|
| 117 |
+
density=0.33,
|
| 118 |
+
shimmer=0.08,
|
| 119 |
+
breath_rate=0.07,
|
| 120 |
+
),
|
| 121 |
+
"angry": SessionProfile(
|
| 122 |
+
key="angry",
|
| 123 |
+
title="Ember Release",
|
| 124 |
+
emotional_tone="Directed and discharging",
|
| 125 |
+
tone_center=528.0,
|
| 126 |
+
pattern="active",
|
| 127 |
+
modulation_type="pulse",
|
| 128 |
+
guidance="Track the sharper pulse until it turns from force into direction.",
|
| 129 |
+
reflection="This session channels intensity into movement rather than compression.",
|
| 130 |
+
duration_hint=12,
|
| 131 |
+
brightness=0.5,
|
| 132 |
+
density=0.62,
|
| 133 |
+
shimmer=0.16,
|
| 134 |
+
breath_rate=0.14,
|
| 135 |
+
),
|
| 136 |
+
"fearful": SessionProfile(
|
| 137 |
+
key="fearful",
|
| 138 |
+
title="Shelter Light",
|
| 139 |
+
emotional_tone="Protected and steadying",
|
| 140 |
+
tone_center=384.0,
|
| 141 |
+
pattern="calm",
|
| 142 |
+
modulation_type="sine",
|
| 143 |
+
guidance="Stay with the nearest tone and let it make the room feel smaller and safer.",
|
| 144 |
+
reflection="This session reduces motion so attention can settle close to the body.",
|
| 145 |
+
duration_hint=14,
|
| 146 |
+
brightness=0.24,
|
| 147 |
+
density=0.31,
|
| 148 |
+
shimmer=0.09,
|
| 149 |
+
breath_rate=0.08,
|
| 150 |
+
),
|
| 151 |
+
"confused": SessionProfile(
|
| 152 |
+
key="confused",
|
| 153 |
+
title="North Star",
|
| 154 |
+
emotional_tone="Clarifying and composed",
|
| 155 |
+
tone_center=480.0,
|
| 156 |
+
pattern="focused",
|
| 157 |
+
modulation_type="sine",
|
| 158 |
+
guidance="Follow one repeating detail until the rest of the field begins to organize.",
|
| 159 |
+
reflection="This session simplifies the soundstage to support orientation.",
|
| 160 |
+
duration_hint=14,
|
| 161 |
+
brightness=0.34,
|
| 162 |
+
density=0.3,
|
| 163 |
+
shimmer=0.13,
|
| 164 |
+
breath_rate=0.1,
|
| 165 |
+
),
|
| 166 |
+
"happy": SessionProfile(
|
| 167 |
+
key="happy",
|
| 168 |
+
title="Bright Current",
|
| 169 |
+
emotional_tone="Open and buoyant",
|
| 170 |
+
tone_center=576.0,
|
| 171 |
+
pattern="active",
|
| 172 |
+
modulation_type="pulse",
|
| 173 |
+
guidance="Enjoy the lift in the rhythm without pushing it faster.",
|
| 174 |
+
reflection="This session keeps energy lively while protecting headroom.",
|
| 175 |
+
duration_hint=12,
|
| 176 |
+
brightness=0.56,
|
| 177 |
+
density=0.4,
|
| 178 |
+
shimmer=0.24,
|
| 179 |
+
breath_rate=0.15,
|
| 180 |
+
),
|
| 181 |
+
"focused": SessionProfile(
|
| 182 |
+
key="focused",
|
| 183 |
+
title="Clear Horizon",
|
| 184 |
+
emotional_tone="Attentive and composed",
|
| 185 |
+
tone_center=512.0,
|
| 186 |
+
pattern="focused",
|
| 187 |
+
modulation_type="sine",
|
| 188 |
+
guidance="Stay with one thought and let the pulse keep the edges quiet.",
|
| 189 |
+
reflection="This session narrows motion to support sustained attention.",
|
| 190 |
+
duration_hint=20,
|
| 191 |
+
brightness=0.4,
|
| 192 |
+
density=0.35,
|
| 193 |
+
shimmer=0.18,
|
| 194 |
+
breath_rate=0.12,
|
| 195 |
+
),
|
| 196 |
+
"relaxed": SessionProfile(
|
| 197 |
+
key="relaxed",
|
| 198 |
+
title="Open Meadow",
|
| 199 |
+
emotional_tone="Loose and restorative",
|
| 200 |
+
tone_center=444.0,
|
| 201 |
+
pattern="relaxed",
|
| 202 |
+
modulation_type="sine",
|
| 203 |
+
guidance="Let the slow sway in the session keep your attention unforced.",
|
| 204 |
+
reflection="This session favors softness and lingering resonance.",
|
| 205 |
+
duration_hint=18,
|
| 206 |
+
brightness=0.28,
|
| 207 |
+
density=0.26,
|
| 208 |
+
shimmer=0.12,
|
| 209 |
+
breath_rate=0.08,
|
| 210 |
+
),
|
| 211 |
+
"active": SessionProfile(
|
| 212 |
+
key="active",
|
| 213 |
+
title="Kinetic Bloom",
|
| 214 |
+
emotional_tone="Motivated and rhythmic",
|
| 215 |
+
tone_center=648.0,
|
| 216 |
+
pattern="active",
|
| 217 |
+
modulation_type="pulse",
|
| 218 |
+
guidance="Let the pulse carry forward motion without turning rushed.",
|
| 219 |
+
reflection="This session keeps energy articulated and bright.",
|
| 220 |
+
duration_hint=10,
|
| 221 |
+
brightness=0.6,
|
| 222 |
+
density=0.48,
|
| 223 |
+
shimmer=0.2,
|
| 224 |
+
breath_rate=0.16,
|
| 225 |
+
),
|
| 226 |
+
"neutral": SessionProfile(
|
| 227 |
+
key="neutral",
|
| 228 |
+
title="Still Current",
|
| 229 |
+
emotional_tone="Balanced and open",
|
| 230 |
+
tone_center=432.0,
|
| 231 |
+
pattern="calm",
|
| 232 |
+
modulation_type="sine",
|
| 233 |
+
guidance="Listen for the simplest pulse and let it set the pace.",
|
| 234 |
+
reflection="This session leaves space for your attention to settle naturally.",
|
| 235 |
+
duration_hint=12,
|
| 236 |
+
brightness=0.3,
|
| 237 |
+
density=0.3,
|
| 238 |
+
shimmer=0.1,
|
| 239 |
+
breath_rate=0.1,
|
| 240 |
+
),
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
|
| 244 |
class RhythmaSymphAICore:
|
| 245 |
"""
|
| 246 |
Interprets text and audio input to determine emotional state and rhythm pattern.
|
|
|
|
| 355 |
if state in words or state in input_text_lower:
|
| 356 |
return state
|
| 357 |
|
| 358 |
+
if "focus" in input_text_lower or "deep work" in input_text_lower:
|
| 359 |
+
return "focused"
|
| 360 |
+
|
| 361 |
self._ensure_embeddings_loaded()
|
| 362 |
if self.embedding_model and self.emotional_embeddings:
|
| 363 |
try:
|
|
|
|
| 406 |
|
| 407 |
return "calm"
|
| 408 |
|
| 409 |
+
def build_session_profile(self, emotional_state, rhythm_pattern):
|
| 410 |
+
if emotional_state in SESSION_PRESETS:
|
| 411 |
+
preset = SESSION_PRESETS[emotional_state]
|
| 412 |
+
else:
|
| 413 |
+
preset = SESSION_PRESETS["neutral"]
|
| 414 |
+
profile = preset.to_dict()
|
| 415 |
+
profile["pattern"] = rhythm_pattern or preset.pattern
|
| 416 |
+
return profile
|
| 417 |
+
|
| 418 |
+
def apply_profile_overrides(
|
| 419 |
+
self,
|
| 420 |
+
profile,
|
| 421 |
+
tone_center=None,
|
| 422 |
+
modulation_type=None,
|
| 423 |
+
session_pattern=None,
|
| 424 |
+
):
|
| 425 |
+
shaped_profile = dict(profile)
|
| 426 |
+
if tone_center is not None and tone_center > 0:
|
| 427 |
+
shaped_profile["tone_center"] = tone_center
|
| 428 |
+
if modulation_type:
|
| 429 |
+
shaped_profile["modulation_type"] = modulation_type
|
| 430 |
+
if session_pattern:
|
| 431 |
+
shaped_profile["pattern"] = session_pattern
|
| 432 |
+
return shaped_profile
|
| 433 |
+
|
| 434 |
def transcribe_audio(self, audio_path):
|
| 435 |
if not self.use_groq or not self.groq_client:
|
| 436 |
return None, "Transcription disabled: Groq client not available or API key missing."
|
|
|
|
| 482 |
input_text=text_to_analyze,
|
| 483 |
emotional_state=result.emotional_state,
|
| 484 |
)
|
| 485 |
+
result.session_profile = self.build_session_profile(
|
| 486 |
+
result.emotional_state,
|
| 487 |
+
result.rhythm_pattern,
|
| 488 |
+
)
|
| 489 |
except Exception as exc:
|
| 490 |
LOGGER.exception("Unexpected error during input analysis.")
|
| 491 |
result = AnalysisResult(
|
| 492 |
+
session_profile=self.build_session_profile("neutral", "calm"),
|
| 493 |
transcription=result.transcription,
|
| 494 |
error=f"Unexpected error during input analysis: {exc}",
|
| 495 |
)
|
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from rhythma_analysis import RhythmaSymphAICore
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def test_build_session_profile_for_anxious_text():
|
| 5 |
+
core = RhythmaSymphAICore(use_groq=False, use_embeddings=False)
|
| 6 |
+
|
| 7 |
+
result = core.analyze_input("I feel anxious and need to settle down")
|
| 8 |
+
|
| 9 |
+
assert result["emotional_state"] == "anxious"
|
| 10 |
+
assert result["session_profile"]["title"] == "Grounding Tide"
|
| 11 |
+
assert result["session_profile"]["pattern"] == "calm"
|
| 12 |
+
assert result["session_profile"]["modulation_type"] == "sine"
|
| 13 |
+
assert result["session_profile"]["guidance"].startswith("Let your breath")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_apply_profile_overrides_keeps_session_shape():
|
| 17 |
+
core = RhythmaSymphAICore(use_groq=False, use_embeddings=False)
|
| 18 |
+
|
| 19 |
+
result = core.analyze_input("I want to focus on deep work")
|
| 20 |
+
profile = core.apply_profile_overrides(
|
| 21 |
+
result["session_profile"],
|
| 22 |
+
tone_center=512.0,
|
| 23 |
+
modulation_type="pulse",
|
| 24 |
+
session_pattern="focused",
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
assert profile["tone_center"] == 512.0
|
| 28 |
+
assert profile["modulation_type"] == "pulse"
|
| 29 |
+
assert profile["pattern"] == "focused"
|
| 30 |
+
assert profile["title"] == result["session_profile"]["title"]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_build_session_profile_for_stressed_text():
|
| 34 |
+
core = RhythmaSymphAICore(use_groq=False, use_embeddings=False)
|
| 35 |
+
|
| 36 |
+
result = core.analyze_input("I feel stressed and overloaded")
|
| 37 |
+
|
| 38 |
+
assert result["emotional_state"] == "stressed"
|
| 39 |
+
assert result["session_profile"]["key"] == "stressed"
|
| 40 |
+
assert result["session_profile"]["title"] == "Soft Landing"
|
| 41 |
+
assert result["session_profile"]["pattern"] == "relaxed"
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_explicit_emotion_wins_over_focus_heuristic():
|
| 45 |
+
core = RhythmaSymphAICore(use_groq=False, use_embeddings=False)
|
| 46 |
+
|
| 47 |
+
result = core.analyze_input("I feel anxious and need to focus")
|
| 48 |
+
|
| 49 |
+
assert result["emotional_state"] == "anxious"
|
| 50 |
+
assert result["session_profile"]["key"] == "anxious"
|
| 51 |
+
assert result["session_profile"]["title"] == "Grounding Tide"
|