ciaochris commited on
Commit
6dec25a
·
verified ·
1 Parent(s): ca1f8d1

Update rhythma.py

Browse files
Files changed (1) hide show
  1. rhythma.py +236 -18
rhythma.py CHANGED
@@ -2,28 +2,79 @@ import numpy as np
2
  import matplotlib.pyplot as plt
3
  from scipy import signal
4
  import pandas as pd
 
 
 
 
5
 
6
  class RhythmaModulationEngine:
7
  """
8
  Rhythma: The Living Modulation Engine
9
  A dynamic rhythm-based audio modulation system that creates responsive
10
- sound experiences based on rhythm patterns.
11
  """
12
 
13
- def __init__(self, base_freq, modulation_type, rhythm_pattern):
14
  """
15
  Initialize the RhythmaModulationEngine.
16
 
17
  Args:
18
- base_freq (float): The base frequency in Hz
19
  modulation_type (str): Type of modulation (sine, pulse, chirp)
20
- rhythm_pattern (str): Pattern type (calm, active, focused, relaxed)
 
21
  """
22
- self.base_freq = base_freq
23
  self.modulation_type = modulation_type
24
- self.rhythm_pattern = rhythm_pattern
25
  self.sample_rate = 44100 # Standard audio sample rate
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # Configure rhythm patterns
28
  self.rhythm_configs = {
29
  "calm": {
@@ -56,19 +107,30 @@ class RhythmaModulationEngine:
56
  }
57
  }
58
 
59
- # Get current rhythm config
60
- self.config = self.rhythm_configs.get(
61
- rhythm_pattern,
62
- self.rhythm_configs["calm"] # Default to calm if pattern not found
63
- )
64
-
65
- # Symbolic mapping (for future use in SymphAI core)
66
  self.symbolic_mapping = {
67
  "calm": "Resonating in the Circle Archetype: completion, wholeness, presence",
68
  "active": "Resonating in the Spiral Archetype: flow, transition, emergence",
69
  "focused": "Resonating in the Triangle Archetype: clarity, direction, purpose",
70
  "relaxed": "Resonating in the Wave Archetype: fluidity, acceptance, surrender"
71
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  def _generate_base_wave(self, duration):
74
  """Generate the base carrier wave"""
@@ -155,6 +217,13 @@ class RhythmaModulationEngine:
155
  normalized = 0.8 * enriched / np.max(np.abs(enriched))
156
 
157
  return normalized
 
 
 
 
 
 
 
158
 
159
  def visualize_waveform(self, duration):
160
  """
@@ -177,7 +246,10 @@ class RhythmaModulationEngine:
177
 
178
  # Plot time domain
179
  ax1.plot(t[:1000], modulated[:1000])
180
- ax1.set_title(f'Rhythma Modulated Waveform: {self.rhythm_pattern} ({self.modulation_type})')
 
 
 
181
  ax1.set_xlabel('Time (s)')
182
  ax1.set_ylabel('Amplitude')
183
 
@@ -186,16 +258,162 @@ class RhythmaModulationEngine:
186
  ax2.pcolormesh(t, f[:500], Sxx[:500], shading='gouraud')
187
  ax2.set_ylabel('Frequency (Hz)')
188
  ax2.set_xlabel('Time (s)')
189
- ax2.set_title('Spectrogram')
190
 
191
  plt.tight_layout()
192
 
193
  # Add symbolic interpretation
194
- symbolic_text = self.symbolic_mapping.get(self.rhythm_pattern, "")
195
- fig.text(0.5, 0.01, symbolic_text, ha='center', fontsize=10, style='italic')
 
 
 
 
 
196
 
197
  return fig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
  def get_symbolic_interpretation(self):
200
  """Return the symbolic interpretation of the current rhythm pattern"""
201
- return self.symbolic_mapping.get(self.rhythm_pattern, "Unknown pattern")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import matplotlib.pyplot as plt
3
  from scipy import signal
4
  import pandas as pd
5
+ from PIL import Image
6
+ import io
7
+ from sklearn.metrics.pairwise import cosine_similarity
8
+ import soundfile as sf
9
 
10
  class RhythmaModulationEngine:
11
  """
12
  Rhythma: The Living Modulation Engine
13
  A dynamic rhythm-based audio modulation system that creates responsive
14
+ sound experiences based on rhythm patterns and emotional states.
15
  """
16
 
17
+ def __init__(self, base_freq=None, modulation_type="sine", rhythm_pattern=None, emotional_state=None):
18
  """
19
  Initialize the RhythmaModulationEngine.
20
 
21
  Args:
22
+ base_freq (float, optional): The base frequency in Hz
23
  modulation_type (str): Type of modulation (sine, pulse, chirp)
24
+ rhythm_pattern (str, optional): Pattern type (calm, active, focused, relaxed)
25
+ emotional_state (str, optional): Emotional state (anxious, stressed, calm, etc.)
26
  """
 
27
  self.modulation_type = modulation_type
 
28
  self.sample_rate = 44100 # Standard audio sample rate
29
 
30
+ # Define frequency mappings for emotional states
31
+ self.emotional_frequencies = {
32
+ "anxious": 396,
33
+ "stressed": 528,
34
+ "calm": 741,
35
+ "sad": 417,
36
+ "angry": 852,
37
+ "fearful": 639,
38
+ "confused": 285,
39
+ "happy": 432
40
+ }
41
+
42
+ # Detailed information about emotional states
43
+ self.emotional_info = {
44
+ "anxious": {
45
+ "name": "Liberating Guilt and Fear",
46
+ "advice": "The 396 Hz frequency is associated with releasing fear and guilt."
47
+ },
48
+ "stressed": {
49
+ "name": "Transformation and Miracles",
50
+ "advice": "The 528 Hz frequency is known as the 'miracle tone' for transformation and DNA repair."
51
+ },
52
+ "calm": {
53
+ "name": "Awakening Intuition",
54
+ "advice": "The 741 Hz frequency is associated with awakening intuition and solving problems."
55
+ },
56
+ "sad": {
57
+ "name": "Facilitating Change",
58
+ "advice": "The 417 Hz frequency is believed to facilitate change and let go of negative energy."
59
+ },
60
+ "angry": {
61
+ "name": "Returning to Spiritual Order",
62
+ "advice": "The 852 Hz frequency is associated with returning to spiritual order and inner strength."
63
+ },
64
+ "fearful": {
65
+ "name": "Connecting Relationships",
66
+ "advice": "The 639 Hz frequency is linked to connecting relationships and understanding."
67
+ },
68
+ "confused": {
69
+ "name": "Quantum Cognition",
70
+ "advice": "The 285 Hz frequency is believed to influence energy fields and aid in healing."
71
+ },
72
+ "happy": {
73
+ "name": "Harmonizing Vibrations",
74
+ "advice": "The 432 Hz frequency is associated with harmonizing vibrations and promoting wellbeing."
75
+ }
76
+ }
77
+
78
  # Configure rhythm patterns
79
  self.rhythm_configs = {
80
  "calm": {
 
107
  }
108
  }
109
 
110
+ # Symbolic mapping for rhythm patterns
 
 
 
 
 
 
111
  self.symbolic_mapping = {
112
  "calm": "Resonating in the Circle Archetype: completion, wholeness, presence",
113
  "active": "Resonating in the Spiral Archetype: flow, transition, emergence",
114
  "focused": "Resonating in the Triangle Archetype: clarity, direction, purpose",
115
  "relaxed": "Resonating in the Wave Archetype: fluidity, acceptance, surrender"
116
  }
117
+
118
+ # Set the base frequency based on emotional state if provided, otherwise use base_freq
119
+ if emotional_state and emotional_state in self.emotional_frequencies:
120
+ self.emotional_state = emotional_state
121
+ self.base_freq = self.emotional_frequencies[emotional_state]
122
+ else:
123
+ self.emotional_state = None
124
+ self.base_freq = base_freq or 440 # Default to A4 if no frequency or emotion provided
125
+
126
+ # Set rhythm pattern
127
+ self.rhythm_pattern = rhythm_pattern or "calm" # Default to calm if not provided
128
+
129
+ # Get current rhythm config
130
+ self.config = self.rhythm_configs.get(
131
+ self.rhythm_pattern,
132
+ self.rhythm_configs["calm"] # Default to calm if pattern not found
133
+ )
134
 
135
  def _generate_base_wave(self, duration):
136
  """Generate the base carrier wave"""
 
217
  normalized = 0.8 * enriched / np.max(np.abs(enriched))
218
 
219
  return normalized
220
+
221
+ def save_audio(self, duration, file_path=None):
222
+ """Generate and save audio to a file"""
223
+ audio = self.generate_modulated_wave(duration)
224
+ file_path = file_path or f"rhythma_{self.base_freq}Hz_{self.rhythm_pattern}.wav"
225
+ sf.write(file_path, audio, self.sample_rate)
226
+ return file_path
227
 
228
  def visualize_waveform(self, duration):
229
  """
 
246
 
247
  # Plot time domain
248
  ax1.plot(t[:1000], modulated[:1000])
249
+ title = f'Rhythma Modulated Waveform: {self.rhythm_pattern} ({self.modulation_type})'
250
+ if self.emotional_state:
251
+ title += f' - {self.emotional_state.capitalize()} state'
252
+ ax1.set_title(title)
253
  ax1.set_xlabel('Time (s)')
254
  ax1.set_ylabel('Amplitude')
255
 
 
258
  ax2.pcolormesh(t, f[:500], Sxx[:500], shading='gouraud')
259
  ax2.set_ylabel('Frequency (Hz)')
260
  ax2.set_xlabel('Time (s)')
261
+ ax2.set_title(f'Spectrogram - Base Frequency: {self.base_freq} Hz')
262
 
263
  plt.tight_layout()
264
 
265
  # Add symbolic interpretation
266
+ fig_text = self.get_symbolic_interpretation()
267
+ if self.emotional_state:
268
+ emotion_info = self.emotional_info.get(self.emotional_state, {})
269
+ if emotion_info:
270
+ fig_text += f"\n{self.base_freq} Hz - {emotion_info.get('name', '')}"
271
+
272
+ fig.text(0.5, 0.01, fig_text, ha='center', fontsize=10, style='italic')
273
 
274
  return fig
275
+
276
+ def get_waveform_image(self):
277
+ """Generate waveform as a PIL Image"""
278
+ duration = 0.1 # Short duration for visualization
279
+ t = np.linspace(0, duration, int(self.sample_rate * duration), False)
280
+ tone = np.sin(2 * np.pi * self.base_freq * t)
281
+
282
+ plt.figure(figsize=(10, 4))
283
+ plt.plot(t, tone)
284
+ plt.title(f"Waveform of {self.base_freq} Hz Tone")
285
+ plt.xlabel("Time (s)")
286
+ plt.ylabel("Amplitude")
287
+ plt.ylim(-1.1, 1.1)
288
+ plt.grid(True)
289
+
290
+ buf = io.BytesIO()
291
+ plt.savefig(buf, format='png')
292
+ buf.seek(0)
293
+ plt.close()
294
+
295
+ return Image.open(buf)
296
 
297
  def get_symbolic_interpretation(self):
298
  """Return the symbolic interpretation of the current rhythm pattern"""
299
+ return self.symbolic_mapping.get(self.rhythm_pattern, "Unknown pattern")
300
+
301
+ def get_emotional_advice(self):
302
+ """Get advice based on emotional state if available"""
303
+ if not self.emotional_state:
304
+ return ""
305
+
306
+ emotion_info = self.emotional_info.get(self.emotional_state, {})
307
+ if not emotion_info:
308
+ return ""
309
+
310
+ return f"{emotion_info.get('advice', '')}"
311
+
312
+ def get_complete_analysis(self):
313
+ """Get a complete analysis including emotional and rhythmic information"""
314
+ analysis = []
315
+
316
+ if self.emotional_state:
317
+ emotion_info = self.emotional_info.get(self.emotional_state, {})
318
+ if emotion_info:
319
+ analysis.append(f"Emotional State: {self.emotional_state.capitalize()}")
320
+ analysis.append(f"Resonant Frequency: {self.base_freq} Hz - {emotion_info.get('name', '')}")
321
+ analysis.append(f"Emotional Advice: {emotion_info.get('advice', '')}")
322
+ else:
323
+ analysis.append(f"Base Frequency: {self.base_freq} Hz")
324
+
325
+ analysis.append(f"Rhythm Pattern: {self.rhythm_pattern.capitalize()}")
326
+ analysis.append(f"Symbolic Interpretation: {self.get_symbolic_interpretation()}")
327
+ analysis.append(f"Modulation Type: {self.modulation_type.capitalize()}")
328
+
329
+ return "\n\n".join(analysis)
330
+
331
+
332
+ class RhythmaSymphAICore:
333
+ """
334
+ SymphAI Core - The intelligent symbolic engine that interprets rhythm and state
335
+ """
336
+
337
+ def __init__(self, rhythm_analyzer=None, pattern_matcher=None):
338
+ """Initialize the SymphAI Core"""
339
+ # Default emotional states that can be detected
340
+ self.emotional_states = [
341
+ "anxious", "stressed", "calm", "sad",
342
+ "angry", "fearful", "confused", "happy"
343
+ ]
344
+
345
+ # Default rhythm patterns
346
+ self.rhythm_patterns = [
347
+ "calm", "active", "focused", "relaxed"
348
+ ]
349
+
350
+ # Create embeddings for rhythm states if using semantic matching
351
+ try:
352
+ from sentence_transformers import SentenceTransformer
353
+ self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
354
+ self.emotional_embeddings = {
355
+ emotion: self.embedding_model.encode([emotion])
356
+ for emotion in self.emotional_states
357
+ }
358
+ self.rhythm_embeddings = {
359
+ pattern: self.embedding_model.encode([pattern])
360
+ for pattern in self.rhythm_patterns
361
+ }
362
+ except ImportError:
363
+ self.embedding_model = None
364
+ self.emotional_embeddings = {}
365
+ self.rhythm_embeddings = {}
366
+ print("SentenceTransformer not installed. Semantic matching disabled.")
367
+
368
+ def get_closest_emotional_state(self, input_text):
369
+ """Map input text to the closest emotional state"""
370
+ if not self.embedding_model:
371
+ # Fallback to simple word matching if no embedding model
372
+ for emotion in self.emotional_states:
373
+ if emotion in input_text.lower():
374
+ return emotion
375
+ return "calm" # Default
376
+
377
+ # Use semantic similarity to find the closest emotion
378
+ input_embedding = self.embedding_model.encode([input_text])
379
+ similarities = {
380
+ emotion: cosine_similarity(input_embedding, embedding)[0][0]
381
+ for emotion, embedding in self.emotional_embeddings.items()
382
+ }
383
+ return max(similarities, key=similarities.get)
384
+
385
+ def get_closest_rhythm_pattern(self, input_text):
386
+ """Map input text to the closest rhythm pattern"""
387
+ if not self.embedding_model:
388
+ # Fallback to simple mapping based on emotional state
389
+ emotional_state = self.get_closest_emotional_state(input_text)
390
+ # Map emotional states to rhythm patterns
391
+ mapping = {
392
+ "anxious": "active",
393
+ "stressed": "active",
394
+ "calm": "calm",
395
+ "sad": "relaxed",
396
+ "angry": "active",
397
+ "fearful": "active",
398
+ "confused": "focused",
399
+ "happy": "calm"
400
+ }
401
+ return mapping.get(emotional_state, "calm")
402
+
403
+ # Use semantic similarity to find the closest rhythm pattern
404
+ input_embedding = self.embedding_model.encode([input_text])
405
+ similarities = {
406
+ pattern: cosine_similarity(input_embedding, embedding)[0][0]
407
+ for pattern, embedding in self.rhythm_embeddings.items()
408
+ }
409
+ return max(similarities, key=similarities.get)
410
+
411
+ def analyze_input(self, input_text):
412
+ """Analyze input text and return appropriate emotional state and rhythm pattern"""
413
+ emotional_state = self.get_closest_emotional_state(input_text)
414
+ rhythm_pattern = self.get_closest_rhythm_pattern(input_text)
415
+
416
+ return {
417
+ "emotional_state": emotional_state,
418
+ "rhythm_pattern": rhythm_pattern
419
+ }