Spaces:
Sleeping
Sleeping
| # FallnAI AgentBuilder Pro v2.0 | |
| import streamlit as st | |
| import sys | |
| import io | |
| import re | |
| import json | |
| import contextlib | |
| from typing import List, Optional, Dict, Any | |
| from pydantic import BaseModel, Field | |
| from crewai import Agent, Task, Crew, Process, LLM | |
| # --- CONFIGURATION SCHEMAS --- | |
| class AgentSchema(BaseModel): | |
| role: str = Field(..., min_length=2) | |
| goal: str = Field(..., min_length=5) | |
| backstory: str = Field(..., min_length=5) | |
| allow_delegation: bool = False | |
| verbose: bool = True | |
| class TaskSchema(BaseModel): | |
| description: str = Field(..., min_length=5) | |
| expected_output: str = Field(..., min_length=5) | |
| agent_index: int | |
| class CrewConfigSchema(BaseModel): | |
| agents: List[AgentSchema] | |
| tasks: List[TaskSchema] | |
| process_type: str = "sequential" | |
| memory: bool = False | |
| cache: bool = True | |
| # --- LOGGING INTERFACE --- | |
| class StreamlitRedirect(io.StringIO): | |
| """Custom stream to redirect stdout to a Streamlit container.""" | |
| def __init__(self, container): | |
| super().__init__() | |
| self.container = container | |
| self.text = "" | |
| def write(self, data): | |
| # Clean ANSI escape sequences for cleaner UI logs | |
| clean_data = re.sub(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])', '', data) | |
| if clean_data: | |
| self.text += clean_data | |
| self.container.code(self.text, language='text') | |
| return len(data) | |
| def st_capture_stdout(container): | |
| """Context manager to capture stdout and update streamlit code block.""" | |
| redirector = StreamlitRedirect(container) | |
| old_stdout = sys.stdout | |
| sys.stdout = redirector | |
| try: | |
| yield redirector | |
| finally: | |
| sys.stdout = old_stdout | |
| # --- THE ENGINE LAYER --- | |
| class CrewProcessor: | |
| """Transforms validated UI data into runnable CrewAI objects using Hugging Face.""" | |
| def build_and_run(config: CrewConfigSchema, hf_token: str, model_id: str, temperature: float) -> str: | |
| # Initialize LLM using the Hugging Face provider | |
| custom_llm = LLM( | |
| model=f"huggingface/{model_id}", | |
| api_key=hf_token, | |
| temperature=temperature | |
| ) | |
| # 1. Build Agents | |
| agents = [] | |
| for a_conf in config.agents: | |
| agents.append(Agent( | |
| role=a_conf.role, | |
| goal=a_conf.goal, | |
| backstory=a_conf.backstory, | |
| allow_delegation=a_conf.allow_delegation, | |
| verbose=a_conf.verbose, | |
| llm=custom_llm | |
| )) | |
| # 2. Build Tasks | |
| tasks = [] | |
| for t_conf in config.tasks: | |
| assigned_agent = agents[t_conf.agent_index] | |
| tasks.append(Task( | |
| description=t_conf.description, | |
| expected_output=t_conf.expected_output, | |
| agent=assigned_agent | |
| )) | |
| # 3. Formulate Crew | |
| process_map = { | |
| "sequential": Process.sequential, | |
| "hierarchical": Process.hierarchical | |
| } | |
| crew = Crew( | |
| agents=agents, | |
| tasks=tasks, | |
| process=process_map.get(config.process_type, Process.sequential), | |
| manager_llm=custom_llm if config.process_type == "hierarchical" else None, | |
| verbose=True, | |
| memory=config.memory, | |
| cache=config.cache | |
| ) | |
| # Kickoff returns a CrewOutput object; .raw contains the string result | |
| result = crew.kickoff() | |
| return str(result.raw) if hasattr(result, 'raw') else str(result) | |
| # --- CODE GENERATOR --- | |
| def generate_standalone_script(config_dict: dict, model_id: str, temperature: float, process_type: str) -> str: | |
| """Generates a standalone Python script for CLI execution.""" | |
| script = f"""# Auto-generated FallnAI CrewAI Script | |
| import os | |
| from crewai import Agent, Task, Crew, Process, LLM | |
| # Ensure your HF_TOKEN is set in your environment variables: | |
| # export HF_TOKEN="your_token_here" | |
| def main(): | |
| llm = LLM( | |
| model="huggingface/{model_id}", | |
| api_key=os.environ.get("HF_TOKEN"), | |
| temperature={temperature} | |
| ) | |
| """ | |
| script += "\n # --- AGENTS ---\n" | |
| for i, a in enumerate(config_dict['agents']): | |
| script += f""" agent_{i} = Agent( | |
| role="{a['role']}", | |
| goal=\"\"\"{a['goal']}\"\"\", | |
| backstory=\"\"\"{a['backstory']}\"\"\", | |
| allow_delegation={a['allow_delegation']}, | |
| verbose=True, | |
| llm=llm | |
| )\n""" | |
| script += "\n # --- TASKS ---\n" | |
| for i, t in enumerate(config_dict['tasks']): | |
| script += f""" task_{i} = Task( | |
| description=\"\"\"{t['description']}\"\"\", | |
| expected_output=\"\"\"{t['expected_output']}\"\"\", | |
| agent=agent_{t['agent_index']} | |
| )\n""" | |
| process_enum = "Process.sequential" if process_type == "sequential" else "Process.hierarchical" | |
| script += f""" | |
| # --- CREW --- | |
| crew = Crew( | |
| agents=[{', '.join([f'agent_{i}' for i in range(len(config_dict['agents']))])}], | |
| tasks=[{', '.join([f'task_{i}' for i in range(len(config_dict['tasks']))])}], | |
| process={process_enum}, | |
| manager_llm=llm if "{process_type}" == "hierarchical" else None, | |
| verbose=True | |
| ) | |
| print("π Kicking off the crew...") | |
| result = crew.kickoff() | |
| print("\\n### FINAL RESULT ###\\n") | |
| print(getattr(result, 'raw', result)) | |
| if __name__ == "__main__": | |
| main() | |
| """ | |
| return script | |
| # --- THE UI LAYER --- | |
| def init_session_state(): | |
| if "agents" not in st.session_state: | |
| st.session_state.agents = [{"role": "Researcher", "goal": "Find latest AI news", "backstory": "An expert analyst.", "allow_delegation": False}] | |
| if "tasks" not in st.session_state: | |
| st.session_state.tasks = [{"description": "Summarize top 3 AI papers from ArXiv.", "expected_output": "A 3-paragraph summary.", "agent_index": 0}] | |
| def load_config_from_file(uploaded_file): | |
| try: | |
| data = json.load(uploaded_file) | |
| if "agents" in data and "tasks" in data: | |
| st.session_state.agents = data["agents"] | |
| st.session_state.tasks = data["tasks"] | |
| st.success("Configuration loaded successfully!") | |
| st.rerun() | |
| else: | |
| st.error("Invalid configuration format.") | |
| except Exception as e: | |
| st.error(f"Error reading file: {e}") | |
| def main(): | |
| st.set_page_config(page_title="FallnAI AgentBuilder Pro", layout="wide", page_icon="βοΈ") | |
| init_session_state() | |
| st.title("FallnAI AgentBuilder Pro v2.0") | |
| st.markdown("Design, manage, and deploy advanced multi-agentic systems.") | |
| # Sidebar: Global Settings & File Management | |
| with st.sidebar: | |
| st.header("π Configuration Manager") | |
| # File Upload | |
| uploaded_file = st.file_uploader("Import JSON Config", type=["json"]) | |
| if uploaded_file is not None: | |
| if st.button("Load Configuration"): | |
| load_config_from_file(uploaded_file) | |
| # File Export | |
| current_config = { | |
| "agents": st.session_state.agents, | |
| "tasks": st.session_state.tasks | |
| } | |
| st.download_button( | |
| label="πΎ Export JSON Config", | |
| data=json.dumps(current_config, indent=4), | |
| file_name="fallnai_crew_config.json", | |
| mime="application/json", | |
| use_container_width=True | |
| ) | |
| st.divider() | |
| st.header("π§ Engine Settings") | |
| hf_token = st.text_input("Hugging Face API Token", type="password") | |
| model_id = st.text_input("Model ID", value="meta-llama/Llama-3.1-8B-Instruct") | |
| temperature = st.slider("Temperature", 0.0, 1.0, 0.7, 0.1) | |
| st.subheader("Crew Dynamics") | |
| process_type = st.radio("Process Strategy", ["sequential", "hierarchical"]) | |
| enable_memory = st.checkbox("Enable Memory (Requires Embeddings)", value=False) | |
| enable_cache = st.checkbox("Enable Caching", value=True) | |
| tab1, tab2, tab3, tab4 = st.tabs(["π₯ Define Agents", "π Define Tasks", "π Run Crew", "π» CLI Deploy Script"]) | |
| # TAB 1: AGENT CONFIGURATION | |
| with tab1: | |
| st.subheader("Agent Configuration") | |
| for i, agent in enumerate(st.session_state.agents): | |
| with st.expander(f"Agent {i+1}: {agent.get('role', 'New')}", expanded=False): | |
| col1, col2 = st.columns([3, 1]) | |
| with col1: | |
| st.session_state.agents[i]['role'] = st.text_input("Role", value=agent.get('role', ''), key=f"role_{i}") | |
| st.session_state.agents[i]['goal'] = st.text_input("Goal", value=agent.get('goal', ''), key=f"goal_{i}") | |
| st.session_state.agents[i]['backstory'] = st.text_area("Backstory", value=agent.get('backstory', ''), key=f"bs_{i}") | |
| with col2: | |
| st.session_state.agents[i]['allow_delegation'] = st.checkbox("Allow Delegation", value=agent.get('allow_delegation', False), key=f"del_{i}") | |
| st.markdown("<br><br>", unsafe_allow_html=True) | |
| if st.button("ποΈ Remove Agent", key=f"rem_agent_{i}", use_container_width=True): | |
| st.session_state.agents.pop(i) | |
| st.rerun() | |
| if st.button("β Add New Agent", type="secondary"): | |
| st.session_state.agents.append({"role": "New Agent", "goal": "", "backstory": "", "allow_delegation": False}) | |
| st.rerun() | |
| # TAB 2: TASK CONFIGURATION | |
| with tab2: | |
| st.subheader("Task Execution Sequence") | |
| agent_roles = [a['role'] for a in st.session_state.agents] | |
| for i, task in enumerate(st.session_state.tasks): | |
| with st.expander(f"Task {i+1}", expanded=False): | |
| col1, col2 = st.columns([3, 1]) | |
| with col1: | |
| st.session_state.tasks[i]['description'] = st.text_area("Description", value=task.get('description', ''), key=f"tdesc_{i}") | |
| st.session_state.tasks[i]['expected_output'] = st.text_input("Expected Output", value=task.get('expected_output', ''), key=f"tout_{i}") | |
| with col2: | |
| current_idx = task.get('agent_index', 0) | |
| safe_idx = min(current_idx, len(agent_roles) - 1) if agent_roles else 0 | |
| st.session_state.tasks[i]['agent_index'] = st.selectbox( | |
| "Assign to Agent", | |
| options=range(len(agent_roles)), | |
| format_func=lambda x: agent_roles[x] if agent_roles else "No Agents", | |
| index=safe_idx, | |
| key=f"t_agent_{i}" | |
| ) | |
| st.markdown("<br><br>", unsafe_allow_html=True) | |
| if st.button("ποΈ Remove Task", key=f"rem_task_{i}", use_container_width=True): | |
| st.session_state.tasks.pop(i) | |
| st.rerun() | |
| if st.button("β Add New Task", type="secondary"): | |
| st.session_state.tasks.append({"description": "", "expected_output": "", "agent_index": 0}) | |
| st.rerun() | |
| # TAB 3: EXECUTION | |
| with tab3: | |
| st.subheader("Live Execution") | |
| if not hf_token: | |
| st.warning("β οΈ Please provide a Hugging Face API Token in the sidebar to run the crew.") | |
| else: | |
| if st.button("π Execute Swarm", use_container_width=True, type="primary"): | |
| try: | |
| # Validation | |
| config_data = CrewConfigSchema( | |
| agents=[AgentSchema(**a) for a in st.session_state.agents], | |
| tasks=[TaskSchema(**t) for t in st.session_state.tasks], | |
| process_type=process_type, | |
| memory=enable_memory, | |
| cache=enable_cache | |
| ) | |
| log_container = st.container() | |
| log_container.markdown("### Runtime Logs") | |
| log_block = log_container.empty() | |
| with st.spinner(f"Initializing distributed run using {model_id}..."): | |
| with st_capture_stdout(log_block): | |
| result = CrewProcessor.build_and_run(config_data, hf_token, model_id, temperature) | |
| st.success("β Execution Completed Successfully!") | |
| st.markdown("### Output Artifact") | |
| st.markdown(result) | |
| st.download_button( | |
| label="π₯ Download Output Artifact (.md)", | |
| data=str(result), | |
| file_name="fallnai_output.md", | |
| mime="text/markdown", | |
| use_container_width=True | |
| ) | |
| except Exception as e: | |
| st.error(f"Execution Error: {str(e)}") | |
| # TAB 4: CLI SCRIPT EXPORT | |
| with tab4: | |
| st.subheader("Standalone Python Script") | |
| st.markdown("Export your configuration as a runnable Python script. Ideal for environments like Termux or dedicated server instances.") | |
| cli_code = generate_standalone_script(current_config, model_id, temperature, process_type) | |
| st.code(cli_code, language="python") | |
| st.download_button( | |
| label="π Download Python Script", | |
| data=cli_code, | |
| file_name="run_crew.py", | |
| mime="text/x-python", | |
| use_container_width=True | |
| ) | |
| if __name__ == "__main__": | |
| main() | |