File size: 4,472 Bytes
0c591a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from langgraph.graph import StateGraph
from langchain_core.runnables import RunnableLambda
from src.state import AgentState
from src.nodes.researcher import researcher_node
from src.nodes.analyzer import analyzer_node
from src.nodes.critic import critic_node
from src.nodes.editor import editor_node
from src.utils.conditions import should_continue
from langsmith import traceable

# Create the cyclic workflow
workflow = StateGraph(AgentState)

# Add all nodes to the workflow
workflow.add_node("Researcher", RunnableLambda(researcher_node))
workflow.add_node("Analyzer", RunnableLambda(analyzer_node))
workflow.add_node("Critic", RunnableLambda(critic_node))
workflow.add_node("Editor", RunnableLambda(editor_node))

# Define the workflow edges
workflow.set_entry_point("Researcher")
workflow.add_edge("Researcher", "Analyzer")
workflow.add_edge("Analyzer", "Critic")

# Add conditional edges for the self-correcting loop
workflow.add_conditional_edges(
    "Critic", 
    should_continue, 
    {
        "exit": "__end__",
        "retry": "Editor"
    }
)

# Complete the loop: Editor β†’ Critic
workflow.add_edge("Editor", "Critic")

# Set the finish point
workflow.set_finish_point("Critic")

# Enhanced configuration for better tracing
workflow.config = {
    "project_name": "AI-strategy-agent-cyclic",
    "tags": ["self-correcting", "quality-loop", "swot-analysis"],
    "metadata": {
        "version": "1.0",
        "environment": "development",
        "workflow_type": "researcher-analyzer-critic-editor"
    }
}

# Compile the workflow
app = workflow.compile()

# Wrapped execution with enhanced tracing
@traceable(name="Run - Self-Correcting SWOT Analysis", tags=["cyclic", "quality-control", "demo"], metadata={"purpose": "iterative_improvement"})
def run_self_correcting_workflow(company_name="Tesla", strategy_focus="Cost Leadership", workflow_id=None, progress_store=None):
    """Execute the complete self-correcting SWOT analysis workflow"""

    # Initialize state with default values
    initial_state = {
        "company_name": company_name,
        "strategy_focus": strategy_focus,
        "raw_data": None,
        "draft_report": None,
        "critique": None,
        "revision_count": 0,
        "messages": [],
        "score": 0,
        "data_source": "live",
        "provider_used": None,
        "sources_failed": [],
        "workflow_id": workflow_id,
        "progress_store": progress_store,
        "error": None  # Set when LLM providers fail
    }
    
    # Execute the workflow
    output = app.invoke(initial_state, config={
        "configurable": {
            "workflow_id": workflow_id,
            "progress_store": progress_store
        }
    })
    
    return output

# Main execution
if __name__ == "__main__":
    # Test with Tesla as the default company
    target_company = "Tesla"
    
    print(f"πŸ” Running Self-Correcting SWOT Analysis for {target_company}...")
    print("πŸ“ This workflow includes: Researcher β†’ Analyzer β†’ Critic β†’ Editor (loop)")
    print("🎯 Loop continues until score β‰₯ 7 or 3 revisions attempted\n")
    
    # Execute the workflow
    result = run_self_correcting_workflow(target_company)
    
    # Display results (with safe fallbacks)
    print(f"🏁 Analysis completed for {target_company}!")
    final_score = result.get('score', 'N/A')
    final_revision_count = result.get('revision_count', 0)
    final_critique = result.get('critique', 'No critique available')
    
    print(f"πŸ“Š Final Score: {final_score}/10")
    print(f"πŸ”„ Revision Count: {final_revision_count}")
    print(f"πŸ’¬ Critique: {final_critique}")
    print(f"\nπŸ“„ Final SWOT Analysis:")
    print(result['draft_report'])
    
    # Summary
    print(f"\nβœ… Self-Correcting Workflow Summary:")
    print(f"   - Company: {target_company}")
    print(f"   - Initial Quality: Improved from unknown to {final_score}/10")
    print(f"   - Revisions Made: {final_revision_count}")
    print(f"   - Final Report Length: {len(result['draft_report'])} characters")
    print(f"   - Workflow: Researcher β†’ Analyzer β†’ Critic β†’ Editor (loop)")
    print(f"   - Tracing: Enhanced LangSmith traces available")
    
    # Quality assessment
    if isinstance(final_score, (int, float)) and final_score >= 7:
        print(f"   - Quality Assessment: βœ… PASSED ({final_score}/10)")
    else:
        print(f"   - Quality Assessment: ⚠️  ACCEPTABLE ({final_score} - max revisions reached)")