ciaochris commited on
Commit
2f0b9e7
·
verified ·
1 Parent(s): 20c1d0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -17
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # main.py
2
  import os
3
  import tempfile
4
  import base64
@@ -10,10 +10,16 @@ from fastapi.middleware.cors import CORSMiddleware
10
  from fastapi.staticfiles import StaticFiles
11
  import uvicorn
12
 
 
 
 
 
 
13
  from rhythma import RhythmaSymphAICore, RhythmaModulationEngine
14
 
15
- app = FastAPI(title="Rhythma API")
16
 
 
17
  app.add_middleware(
18
  CORSMiddleware,
19
  allow_origins=["*"],
@@ -22,9 +28,10 @@ app.add_middleware(
22
  allow_headers=["*"],
23
  )
24
 
25
- # Serve the beautiful frontend
26
  app.mount("/static", StaticFiles(directory="."), name="static")
27
 
 
28
  symphai = RhythmaSymphAICore(use_groq=True)
29
 
30
  @app.post("/generate")
@@ -38,16 +45,19 @@ async def generate(
38
  ):
39
  audio_path = None
40
  if audio and audio.filename:
 
41
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
42
  tmp.write(await audio.read())
43
  audio_path = tmp.name
44
 
45
  try:
 
46
  analysis = symphai.analyze_input(
47
  input_text.strip() or None,
48
  audio_path
49
  )
50
 
 
51
  engine = RhythmaModulationEngine(
52
  base_freq=override_freq if override_freq > 0 else None,
53
  modulation_type=override_modulation,
@@ -55,30 +65,29 @@ async def generate(
55
  emotional_state=analysis.get("emotional_state")
56
  )
57
 
58
- # Audio
59
  timestamp = int(os.times()[4] * 1000)
60
  audio_file = f"rhythma_{timestamp}.wav"
61
  saved_audio_path = engine.save_audio(duration, audio_file)
62
 
63
- # Waveform image (PIL)
64
  waveform_pil = engine.get_waveform_image()
65
-
66
- # Full plot
67
  fig = engine.visualize_waveform(duration)
68
 
69
- # Convert plot to base64
70
  buf = io.BytesIO()
71
  fig.savefig(buf, format="png", bbox_inches="tight", dpi=220)
72
  buf.seek(0)
73
  plot_base64 = base64.b64encode(buf.read()).decode("utf-8")
74
  plt.close(fig)
75
 
76
- # Convert simple waveform to base64
77
  buf = io.BytesIO()
78
  waveform_pil.save(buf, format="PNG")
79
  buf.seek(0)
80
  simple_wave_base64 = base64.b64encode(buf.read()).decode("utf-8")
81
 
 
82
  return {
83
  "analysis_text": engine.get_complete_analysis(),
84
  "audio_base64": base64.b64encode(open(saved_audio_path, "rb").read()).decode("utf-8"),
@@ -96,15 +105,11 @@ async def generate(
96
  return JSONResponse(status_code=500, content={"error": str(e)})
97
 
98
  finally:
 
99
  if audio_path and os.path.exists(audio_path):
100
  os.unlink(audio_path)
101
 
 
 
102
  if __name__ == "__main__":
103
- print("🚀 Rhythma is live → http://localhost:8000/static/index.html")
104
- print(" (Press CTRL+C to stop)")
105
- uvicorn.run(
106
- "app:app", # ← This was the problem (was "main:app")
107
- host="0.0.0.0",
108
- port=8000,
109
- reload=True
110
- )
 
1
+ # app.py
2
  import os
3
  import tempfile
4
  import base64
 
10
  from fastapi.staticfiles import StaticFiles
11
  import uvicorn
12
 
13
+ # Hugging Face Spaces specific setup
14
+ os.environ["HF_HOME"] = "/tmp/hf_cache"
15
+ os.environ["SENTENCE_TRANSFORMERS_HOME"] = "/tmp/hf_cache"
16
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
17
+
18
  from rhythma import RhythmaSymphAICore, RhythmaModulationEngine
19
 
20
+ app = FastAPI(title="Rhythma: The Living Modulation Engine")
21
 
22
+ # Enable CORS for the frontend
23
  app.add_middleware(
24
  CORSMiddleware,
25
  allow_origins=["*"],
 
28
  allow_headers=["*"],
29
  )
30
 
31
+ # Mount static files so index.html is accessible
32
  app.mount("/static", StaticFiles(directory="."), name="static")
33
 
34
+ # Initialize the core components
35
  symphai = RhythmaSymphAICore(use_groq=True)
36
 
37
  @app.post("/generate")
 
45
  ):
46
  audio_path = None
47
  if audio and audio.filename:
48
+ # Save uploaded audio to temp file
49
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
50
  tmp.write(await audio.read())
51
  audio_path = tmp.name
52
 
53
  try:
54
+ # Step 1: Analyze input using your SymphAI Core
55
  analysis = symphai.analyze_input(
56
  input_text.strip() or None,
57
  audio_path
58
  )
59
 
60
+ # Step 2: Generate modulated experience
61
  engine = RhythmaModulationEngine(
62
  base_freq=override_freq if override_freq > 0 else None,
63
  modulation_type=override_modulation,
 
65
  emotional_state=analysis.get("emotional_state")
66
  )
67
 
68
+ # Generate audio file
69
  timestamp = int(os.times()[4] * 1000)
70
  audio_file = f"rhythma_{timestamp}.wav"
71
  saved_audio_path = engine.save_audio(duration, audio_file)
72
 
73
+ # Generate visualizations
74
  waveform_pil = engine.get_waveform_image()
 
 
75
  fig = engine.visualize_waveform(duration)
76
 
77
+ # Convert matplotlib figure to base64
78
  buf = io.BytesIO()
79
  fig.savefig(buf, format="png", bbox_inches="tight", dpi=220)
80
  buf.seek(0)
81
  plot_base64 = base64.b64encode(buf.read()).decode("utf-8")
82
  plt.close(fig)
83
 
84
+ # Convert simple waveform PIL image to base64
85
  buf = io.BytesIO()
86
  waveform_pil.save(buf, format="PNG")
87
  buf.seek(0)
88
  simple_wave_base64 = base64.b64encode(buf.read()).decode("utf-8")
89
 
90
+ # Return all data to the frontend
91
  return {
92
  "analysis_text": engine.get_complete_analysis(),
93
  "audio_base64": base64.b64encode(open(saved_audio_path, "rb").read()).decode("utf-8"),
 
105
  return JSONResponse(status_code=500, content={"error": str(e)})
106
 
107
  finally:
108
+ # Clean up temporary audio file
109
  if audio_path and os.path.exists(audio_path):
110
  os.unlink(audio_path)
111
 
112
+
113
+ # For local testing only (Hugging Face uses the Dockerfile CMD)
114
  if __name__ == "__main__":
115
+ uvicorn.run("app:app", host="0.0.0.0", port=7860)