File size: 5,994 Bytes
7fafb5b
7dc7c0d
7fafb5b
 
 
 
 
 
 
 
 
 
 
774fa4b
 
 
7dc7c0d
 
 
7fafb5b
 
774fa4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0a3f5f
 
 
7fafb5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d61f97
7dc7c0d
 
 
 
 
 
7fafb5b
d0a3f5f
 
 
7fafb5b
774fa4b
7fafb5b
 
 
d0a3f5f
7fafb5b
 
 
d0a3f5f
7fafb5b
 
d0a3f5f
 
 
 
 
 
 
 
7fafb5b
d0a3f5f
 
7fafb5b
d0a3f5f
 
 
 
 
 
 
 
 
7fafb5b
7dc7c0d
7fafb5b
 
7dc7c0d
 
 
 
4d61f97
 
 
7dc7c0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d61f97
 
 
 
 
 
 
 
 
 
 
 
7fafb5b
7dc7c0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d61f97
 
 
 
 
 
 
 
 
 
 
 
7dc7c0d
 
 
 
 
4d61f97
 
7dc7c0d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200

# CORE CONVERSATION ENGINE β€” core/conversation_engine.py

from core.agent_selector import rule_based_selection, llm_based_selection

from agents.jung_agent import jung_agent, jung_followup_agent
from agents.dream_agent import dream_agent, dream_confirm, dream_interpret
from agents.shadow_agent import shadow_response
from agents.myth_agent import myth_response
from agents.epistemic_agent import epistemic_response
from agents.bpsy_agent import bpsy_response
from agents.jred_agent import jred_response

from core.tools.web_search import web_search
from core.tools.search_trigger import needs_web_search

# IMPORT USER PROFILE UPDATER
from core.conversation_manager import update_user_profile

def handle_turn(state, user_input):
    
    # -----------------------------
    # OPTIONAL WEB SEARCH AUGMENT
    # -----------------------------
    search_results = None

    if needs_web_search(user_input):
        print("🌐 Triggering web search...")
        search_results = web_search(user_input)

        # convert results into text block
        if search_results:
            context = "\n\n".join([
                f"{r['title']}\n{r['content']}"
                for r in search_results
            ])

            # inject into state (temporary context)
            state.web_context = context
        else:
            state.web_context = None
    
    
    # πŸ”’ FORCE CURRENT AGENT CONTROL
    if not hasattr(state, "current_agent") or state.current_agent is None:
        state.current_agent = "jung"
    
    # -----------------------------
    # END SESSION
    # -----------------------------
    if user_input.strip().lower() == "end session":
        return {
            "type": "end",
            "content": "Session ended. You can start a new conversation anytime.",
            "next_stage": None
        }

    # -----------------------------
    # INITIALIZE STAGE
    # -----------------------------
    if not hasattr(state, "stage") or state.stage is None:
        state.stage = "jung"

    state.add_user_input(user_input)
    
    # -----------------------------
    # STORE USER INPUT INTO HISTORY
    # -----------------------------
    state.add_user_input(user_input)

    # Current flow control - Route the flow based on current agent
    stage = state.stage
    agent = state.current_agent
    
    
    print(f"🧠 Current Stage: {stage}")
    
    # -----------------------------
    # JUNG FLOW
    # -----------------------------
    if agent == "jung":
        print("\n🟑 ENTERING JUNG AGENT")
        output = jung_agent(state)

    elif agent == "dream":
        output = dream_agent(state)

    elif agent == "shadow":
        output = shadow_response(user_input)

    elif agent == "myth":
        output = myth_response(user_input)

    elif agent == "epistemic":
        output = epistemic_response(user_input)

    elif agent == "bpsy":
        output = bpsy_response(user_input)

    elif agent == "jred":
        output = jred_response(user_input)

    else:
        output = {
            "type": "error",
            "content": "Unknown agent."
        }
    
    # -----------------------------
    # STORE SYSTEM OUTPUT
    # -----------------------------
    state.add_system_output(output["content"])

    # -----------------------------
    # UPDATE NEXT STAGE
    # -----------------------------
    state.stage = output.get("next_stage")
    #state.stage = None

    # ---------------------------------------------------
    # πŸ”₯ UPDATE LONGITUDINAL USER PROFILE / MEMORY
    # ---------------------------------------------------
    # This allows the system to:
    # - detect recurring themes
    # - identify psychological patterns
    # - accumulate user tendencies
    # - support future epistemic analysis
    # - improve continuity across sessions
    # ---------------------------------------------------
    try:

        update_user_profile(
            state=state,
            user_input=user_input,
            jung_output=output["content"]
        )

        print("🧠 User profile updated:")
        print(state.user_profile)

    except Exception as e:

        print("❌ USER PROFILE UPDATE FAILED:", e)
    
    # -----------------------------
    # STORE PER-AGENT MEMORY
    # -----------------------------
    if not hasattr(state, "agent_histories"):
        state.agent_histories = {}

    if not hasattr(state, "current_agent"):
        state.current_agent = "jung"

    if state.current_agent not in state.agent_histories:
        state.agent_histories[state.current_agent] = []


    # ---------------------------------------------------
    # STORE CURRENT TURN INTO AGENT-SPECIFIC MEMORY
    # ---------------------------------------------------
    state.agent_histories[state.current_agent].append({
        "role": "client",
        "text": user_input
    })

    state.agent_histories[state.current_agent].append({
        "role": "agent",
        "text": output["content"]
    })

    # -----------------------------
    # BACKEND LOGGING
    # -----------------------------
    print("\nπŸ“œ UPDATED CONVERSATION HISTORY:")

    for msg in state.history:
        print(f"{msg['role'].upper()}: {msg['content']}")

    print("-" * 50)


    # -----------------------------
    # FINAL RESPONSE TO UI
    # -----------------------------
    return {
        "type": output["type"],
        "agent": state.current_agent,
        "content": output["content"],
        "next_stage": output.get("next_stage"),
        "agent_log": {
            "agent": state.current_agent,
            "history": state.agent_histories[state.current_agent]
        },
        # πŸ”₯ OPTIONAL
        # Can later be shown in Epistemic UI
        "user_profile": getattr(state, "user_profile", {})

    }