Falln87 commited on
Commit
d1413a2
Β·
verified Β·
1 Parent(s): 417364c

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +346 -34
src/streamlit_app.py CHANGED
@@ -1,40 +1,352 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
1
+ # FallnAI AgentBuilder Pro v2.0
 
 
2
  import streamlit as st
3
+ import sys
4
+ import io
5
+ import re
6
+ import json
7
+ import contextlib
8
+ from typing import List, Optional, Dict, Any
9
+ from pydantic import BaseModel, Field
10
+ from crewai import Agent, Task, Crew, Process, LLM
11
 
12
+ # --- CONFIGURATION SCHEMAS ---
13
+
14
+ class AgentSchema(BaseModel):
15
+ role: str = Field(..., min_length=2)
16
+ goal: str = Field(..., min_length=5)
17
+ backstory: str = Field(..., min_length=5)
18
+ allow_delegation: bool = False
19
+ verbose: bool = True
20
+
21
+ class TaskSchema(BaseModel):
22
+ description: str = Field(..., min_length=5)
23
+ expected_output: str = Field(..., min_length=5)
24
+ agent_index: int
25
+
26
+ class CrewConfigSchema(BaseModel):
27
+ agents: List[AgentSchema]
28
+ tasks: List[TaskSchema]
29
+ process_type: str = "sequential"
30
+ memory: bool = False
31
+ cache: bool = True
32
+
33
+ # --- LOGGING INTERFACE ---
34
+
35
+ class StreamlitRedirect(io.StringIO):
36
+ """Custom stream to redirect stdout to a Streamlit container."""
37
+ def __init__(self, container):
38
+ super().__init__()
39
+ self.container = container
40
+ self.text = ""
41
+
42
+ def write(self, data):
43
+ # Clean ANSI escape sequences for cleaner UI logs
44
+ clean_data = re.sub(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])', '', data)
45
+ if clean_data:
46
+ self.text += clean_data
47
+ self.container.code(self.text, language='text')
48
+ return len(data)
49
+
50
+ @contextlib.contextmanager
51
+ def st_capture_stdout(container):
52
+ """Context manager to capture stdout and update streamlit code block."""
53
+ redirector = StreamlitRedirect(container)
54
+ old_stdout = sys.stdout
55
+ sys.stdout = redirector
56
+ try:
57
+ yield redirector
58
+ finally:
59
+ sys.stdout = old_stdout
60
+
61
+ # --- THE ENGINE LAYER ---
62
 
63
+ class CrewProcessor:
64
+ """Transforms validated UI data into runnable CrewAI objects using Hugging Face."""
65
+
66
+ @staticmethod
67
+ def build_and_run(config: CrewConfigSchema, hf_token: str, model_id: str, temperature: float) -> str:
68
+ # Initialize LLM using the Hugging Face provider
69
+ custom_llm = LLM(
70
+ model=f"huggingface/{model_id}",
71
+ api_key=hf_token,
72
+ temperature=temperature
73
+ )
74
+
75
+ # 1. Build Agents
76
+ agents = []
77
+ for a_conf in config.agents:
78
+ agents.append(Agent(
79
+ role=a_conf.role,
80
+ goal=a_conf.goal,
81
+ backstory=a_conf.backstory,
82
+ allow_delegation=a_conf.allow_delegation,
83
+ verbose=a_conf.verbose,
84
+ llm=custom_llm
85
+ ))
86
+
87
+ # 2. Build Tasks
88
+ tasks = []
89
+ for t_conf in config.tasks:
90
+ assigned_agent = agents[t_conf.agent_index]
91
+ tasks.append(Task(
92
+ description=t_conf.description,
93
+ expected_output=t_conf.expected_output,
94
+ agent=assigned_agent
95
+ ))
96
+
97
+ # 3. Formulate Crew
98
+ process_map = {
99
+ "sequential": Process.sequential,
100
+ "hierarchical": Process.hierarchical
101
+ }
102
+
103
+ crew = Crew(
104
+ agents=agents,
105
+ tasks=tasks,
106
+ process=process_map.get(config.process_type, Process.sequential),
107
+ manager_llm=custom_llm if config.process_type == "hierarchical" else None,
108
+ verbose=True,
109
+ memory=config.memory,
110
+ cache=config.cache
111
+ )
112
+
113
+ # Kickoff returns a CrewOutput object; .raw contains the string result
114
+ result = crew.kickoff()
115
+ return str(result.raw) if hasattr(result, 'raw') else str(result)
116
 
117
+ # --- CODE GENERATOR ---
118
+
119
+ def generate_standalone_script(config_dict: dict, model_id: str, temperature: float, process_type: str) -> str:
120
+ """Generates a standalone Python script for CLI execution."""
121
+ script = f"""# Auto-generated FallnAI CrewAI Script
122
+ import os
123
+ from crewai import Agent, Task, Crew, Process, LLM
124
+
125
+ # Ensure your HF_TOKEN is set in your environment variables:
126
+ # export HF_TOKEN="your_token_here"
127
+
128
+ def main():
129
+ llm = LLM(
130
+ model="huggingface/{model_id}",
131
+ api_key=os.environ.get("HF_TOKEN"),
132
+ temperature={temperature}
133
+ )
134
+ """
135
+
136
+ script += "\n # --- AGENTS ---\n"
137
+ for i, a in enumerate(config_dict['agents']):
138
+ script += f""" agent_{i} = Agent(
139
+ role="{a['role']}",
140
+ goal=\"\"\"{a['goal']}\"\"\",
141
+ backstory=\"\"\"{a['backstory']}\"\"\",
142
+ allow_delegation={a['allow_delegation']},
143
+ verbose=True,
144
+ llm=llm
145
+ )\n"""
146
+
147
+ script += "\n # --- TASKS ---\n"
148
+ for i, t in enumerate(config_dict['tasks']):
149
+ script += f""" task_{i} = Task(
150
+ description=\"\"\"{t['description']}\"\"\",
151
+ expected_output=\"\"\"{t['expected_output']}\"\"\",
152
+ agent=agent_{t['agent_index']}
153
+ )\n"""
154
+
155
+ process_enum = "Process.sequential" if process_type == "sequential" else "Process.hierarchical"
156
+
157
+ script += f"""
158
+ # --- CREW ---
159
+ crew = Crew(
160
+ agents=[{', '.join([f'agent_{i}' for i in range(len(config_dict['agents']))])}],
161
+ tasks=[{', '.join([f'task_{i}' for i in range(len(config_dict['tasks']))])}],
162
+ process={process_enum},
163
+ manager_llm=llm if "{process_type}" == "hierarchical" else None,
164
+ verbose=True
165
+ )
166
+
167
+ print("πŸš€ Kicking off the crew...")
168
+ result = crew.kickoff()
169
+ print("\\n### FINAL RESULT ###\\n")
170
+ print(getattr(result, 'raw', result))
171
+
172
+ if __name__ == "__main__":
173
+ main()
174
  """
175
+ return script
176
+
177
+ # --- THE UI LAYER ---
178
+
179
+ def init_session_state():
180
+ if "agents" not in st.session_state:
181
+ st.session_state.agents = [{"role": "Researcher", "goal": "Find latest AI news", "backstory": "An expert analyst.", "allow_delegation": False}]
182
+ if "tasks" not in st.session_state:
183
+ st.session_state.tasks = [{"description": "Summarize top 3 AI papers from ArXiv.", "expected_output": "A 3-paragraph summary.", "agent_index": 0}]
184
+
185
+ def load_config_from_file(uploaded_file):
186
+ try:
187
+ data = json.load(uploaded_file)
188
+ if "agents" in data and "tasks" in data:
189
+ st.session_state.agents = data["agents"]
190
+ st.session_state.tasks = data["tasks"]
191
+ st.success("Configuration loaded successfully!")
192
+ st.rerun()
193
+ else:
194
+ st.error("Invalid configuration format.")
195
+ except Exception as e:
196
+ st.error(f"Error reading file: {e}")
197
+
198
+ def main():
199
+ st.set_page_config(page_title="FallnAI AgentBuilder Pro", layout="wide", page_icon="βš™οΈ")
200
+ init_session_state()
201
+
202
+ st.title("FallnAI AgentBuilder Pro v2.0")
203
+ st.markdown("Design, manage, and deploy advanced multi-agentic systems.")
204
+
205
+ # Sidebar: Global Settings & File Management
206
+ with st.sidebar:
207
+ st.header("πŸ“ Configuration Manager")
208
+
209
+ # File Upload
210
+ uploaded_file = st.file_uploader("Import JSON Config", type=["json"])
211
+ if uploaded_file is not None:
212
+ if st.button("Load Configuration"):
213
+ load_config_from_file(uploaded_file)
214
+
215
+ # File Export
216
+ current_config = {
217
+ "agents": st.session_state.agents,
218
+ "tasks": st.session_state.tasks
219
+ }
220
+ st.download_button(
221
+ label="πŸ’Ύ Export JSON Config",
222
+ data=json.dumps(current_config, indent=4),
223
+ file_name="fallnai_crew_config.json",
224
+ mime="application/json",
225
+ use_container_width=True
226
+ )
227
+
228
+ st.divider()
229
+
230
+ st.header("🧠 Engine Settings")
231
+ hf_token = st.text_input("Hugging Face API Token", type="password")
232
+ model_id = st.text_input("Model ID", value="meta-llama/Llama-3.1-8B-Instruct")
233
+ temperature = st.slider("Temperature", 0.0, 1.0, 0.7, 0.1)
234
+
235
+ st.subheader("Crew Dynamics")
236
+ process_type = st.radio("Process Strategy", ["sequential", "hierarchical"])
237
+ enable_memory = st.checkbox("Enable Memory (Requires Embeddings)", value=False)
238
+ enable_cache = st.checkbox("Enable Caching", value=True)
239
+
240
+ tab1, tab2, tab3, tab4 = st.tabs(["πŸ‘₯ Define Agents", "πŸ“ Define Tasks", "πŸš€ Run Crew", "πŸ’» CLI Deploy Script"])
241
+
242
+ # TAB 1: AGENT CONFIGURATION
243
+ with tab1:
244
+ st.subheader("Agent Configuration")
245
+ for i, agent in enumerate(st.session_state.agents):
246
+ with st.expander(f"Agent {i+1}: {agent.get('role', 'New')}", expanded=False):
247
+ col1, col2 = st.columns([3, 1])
248
+ with col1:
249
+ st.session_state.agents[i]['role'] = st.text_input("Role", value=agent.get('role', ''), key=f"role_{i}")
250
+ st.session_state.agents[i]['goal'] = st.text_input("Goal", value=agent.get('goal', ''), key=f"goal_{i}")
251
+ st.session_state.agents[i]['backstory'] = st.text_area("Backstory", value=agent.get('backstory', ''), key=f"bs_{i}")
252
+ with col2:
253
+ st.session_state.agents[i]['allow_delegation'] = st.checkbox("Allow Delegation", value=agent.get('allow_delegation', False), key=f"del_{i}")
254
+ st.markdown("<br><br>", unsafe_allow_html=True)
255
+ if st.button("πŸ—‘οΈ Remove Agent", key=f"rem_agent_{i}", use_container_width=True):
256
+ st.session_state.agents.pop(i)
257
+ st.rerun()
258
+
259
+ if st.button("βž• Add New Agent", type="secondary"):
260
+ st.session_state.agents.append({"role": "New Agent", "goal": "", "backstory": "", "allow_delegation": False})
261
+ st.rerun()
262
+
263
+ # TAB 2: TASK CONFIGURATION
264
+ with tab2:
265
+ st.subheader("Task Execution Sequence")
266
+ agent_roles = [a['role'] for a in st.session_state.agents]
267
+
268
+ for i, task in enumerate(st.session_state.tasks):
269
+ with st.expander(f"Task {i+1}", expanded=False):
270
+ col1, col2 = st.columns([3, 1])
271
+ with col1:
272
+ st.session_state.tasks[i]['description'] = st.text_area("Description", value=task.get('description', ''), key=f"tdesc_{i}")
273
+ st.session_state.tasks[i]['expected_output'] = st.text_input("Expected Output", value=task.get('expected_output', ''), key=f"tout_{i}")
274
+ with col2:
275
+ current_idx = task.get('agent_index', 0)
276
+ safe_idx = min(current_idx, len(agent_roles) - 1) if agent_roles else 0
277
+
278
+ st.session_state.tasks[i]['agent_index'] = st.selectbox(
279
+ "Assign to Agent",
280
+ options=range(len(agent_roles)),
281
+ format_func=lambda x: agent_roles[x] if agent_roles else "No Agents",
282
+ index=safe_idx,
283
+ key=f"t_agent_{i}"
284
+ )
285
+ st.markdown("<br><br>", unsafe_allow_html=True)
286
+ if st.button("πŸ—‘οΈ Remove Task", key=f"rem_task_{i}", use_container_width=True):
287
+ st.session_state.tasks.pop(i)
288
+ st.rerun()
289
+
290
+ if st.button("βž• Add New Task", type="secondary"):
291
+ st.session_state.tasks.append({"description": "", "expected_output": "", "agent_index": 0})
292
+ st.rerun()
293
+
294
+ # TAB 3: EXECUTION
295
+ with tab3:
296
+ st.subheader("Live Execution")
297
+
298
+ if not hf_token:
299
+ st.warning("⚠️ Please provide a Hugging Face API Token in the sidebar to run the crew.")
300
+ else:
301
+ if st.button("πŸš€ Execute Swarm", use_container_width=True, type="primary"):
302
+ try:
303
+ # Validation
304
+ config_data = CrewConfigSchema(
305
+ agents=[AgentSchema(**a) for a in st.session_state.agents],
306
+ tasks=[TaskSchema(**t) for t in st.session_state.tasks],
307
+ process_type=process_type,
308
+ memory=enable_memory,
309
+ cache=enable_cache
310
+ )
311
+
312
+ log_container = st.container()
313
+ log_container.markdown("### Runtime Logs")
314
+ log_block = log_container.empty()
315
+
316
+ with st.spinner(f"Initializing distributed run using {model_id}..."):
317
+ with st_capture_stdout(log_block):
318
+ result = CrewProcessor.build_and_run(config_data, hf_token, model_id, temperature)
319
+
320
+ st.success("βœ… Execution Completed Successfully!")
321
+ st.markdown("### Output Artifact")
322
+ st.markdown(result)
323
+
324
+ st.download_button(
325
+ label="πŸ“₯ Download Output Artifact (.md)",
326
+ data=str(result),
327
+ file_name="fallnai_output.md",
328
+ mime="text/markdown",
329
+ use_container_width=True
330
+ )
331
+
332
+ except Exception as e:
333
+ st.error(f"Execution Error: {str(e)}")
334
+
335
+ # TAB 4: CLI SCRIPT EXPORT
336
+ with tab4:
337
+ st.subheader("Standalone Python Script")
338
+ st.markdown("Export your configuration as a runnable Python script. Ideal for environments like Termux or dedicated server instances.")
339
+
340
+ cli_code = generate_standalone_script(current_config, model_id, temperature, process_type)
341
+ st.code(cli_code, language="python")
342
+
343
+ st.download_button(
344
+ label="🐍 Download Python Script",
345
+ data=cli_code,
346
+ file_name="run_crew.py",
347
+ mime="text/x-python",
348
+ use_container_width=True
349
+ )
350
 
351
+ if __name__ == "__main__":
352
+ main()