File size: 2,368 Bytes
1f725d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import logging
import asyncio
from dotenv import load_dotenv
from langgraph.graph import StateGraph,START,END
from src.Blog.models.State_model import State
from src.Blog.graph.nodes.router_node import router_node,route_next
from src.Blog.graph.nodes.reducer_node import reducer_node
from src.Blog.graph.nodes.search_node import research_node
from src.Blog.graph.nodes.orchaster_node import orchestrator_node
from src.Blog.graph.nodes.worker_node import worker_node
from src.Blog.graph.nodes.fanout_node import fanout
load_dotenv()

g = StateGraph(State)
g.add_node("router", router_node)
g.add_node("research", research_node)
g.add_node("orchestrator", orchestrator_node)
g.add_node("worker", worker_node)
g.add_node("reducer", reducer_node)

g.add_edge(START, "router")
g.add_conditional_edges("router", route_next, {"research": "research", "orchestrator": "orchestrator"})
g.add_edge("research", "orchestrator")

g.add_conditional_edges("orchestrator", fanout, ["worker"])
g.add_edge("worker", "reducer")
g.add_edge("reducer", END)

app = g.compile()


png_data = app.get_graph().draw_mermaid_png()
with open("graph.png", "wb") as f:
    f.write(png_data)
async def run(topic: str):
    logging.info(f"Starting blog generation for topic: {topic}")
    try:
        # out = await app.ainvoke(
        #     {
        #         "topic": topic,
        #         "mode": "",
        #         "needs_research": False,
        #         "queries": [],
        #         "evidence": [],
        #         "plan": None,
        #         "sections": [],
        #         "final": "",
        #     }
        # )
        async for step in app.astream(
            {
                "topic": topic,
                "mode": "",
                "needs_research": False,
                "queries": [],
                "evidence": [],
                "plan": None,
                "sections": [],
                "final": "",
            },
            stream_mode="values"   # important
        ):
            # print("Current Step:", step)
            yield step
        logging.info("Blog generation completed successfully")
        return
    except Exception as e:
        logging.error(f"Error during graph execution: {str(e)}")
        raise

if __name__ == "__main__":
    from logger import *
    out=asyncio.run(run("State of Multimodal LLMs in 2026"))
    print(out)