elisaklunder commited on
Commit
4b58d20
·
1 Parent(s): 18509d0

Fix bugs of message disappearing sometimes and being duplicate sometimes

Browse files
CONTRIBUTING.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to ml-agent
2
+
3
+ ## Creating a Pull Request
4
+
5
+ ### 1. Create and work on your branch
6
+
7
+ ```bash
8
+ # Create a new branch
9
+ git checkout -b your-feature-branch
10
+
11
+ # Make your changes, then commit
12
+ git add .
13
+ git commit -m "Your commit message"
14
+ ```
15
+
16
+ ### 2. Push your branch to keep changes backed up
17
+
18
+ ```bash
19
+ # First time pushing this branch
20
+ git push --set-upstream origin your-feature-branch
21
+
22
+ # Subsequent pushes
23
+ git push
24
+ ```
25
+
26
+ ### 3. When ready, create a Pull Request
27
+
28
+ ```bash
29
+ # Easy way: Use the helper script
30
+ ./create-pr.sh "Your PR title" "Optional description"
31
+
32
+ # This automatically:
33
+ # - Gets your current branch
34
+ # - Creates the PR
35
+ # - Pushes your changes to it
36
+ ```
37
+
38
+ ### Example workflow
39
+
40
+ ```bash
41
+ # 1. Create branch
42
+ git checkout -b fix-bug-123
43
+
44
+ # 2. Make changes and commit
45
+ git add backend/main.py
46
+ git commit -m "Fix authentication bug"
47
+
48
+ # 3. Push to keep changes safe
49
+ git push --set-upstream origin fix-bug-123
50
+
51
+ # 4. Continue working...
52
+ git add frontend/src/App.tsx
53
+ git commit -m "Update UI"
54
+ git push
55
+
56
+ # 5. When ready, create PR
57
+ ./create-pr.sh "Fix authentication bug" "Fixes issue where users could not authenticate in dev mode"
58
+ ```
59
+
60
+ ## Development Setup
61
+
62
+ ### Running locally with hot-reload
63
+
64
+ **Backend:**
65
+ ```bash
66
+ cd backend
67
+ uv run uvicorn main:app --host 0.0.0.0 --port 7860 --reload
68
+ ```
69
+
70
+ **Frontend** (in a separate terminal):
71
+ ```bash
72
+ cd frontend
73
+ npm run dev
74
+ ```
75
+
76
+ Access the app at http://localhost:5173
77
+
78
+ ### Environment Variables
79
+
80
+ Make sure your `.env` file in the project root contains:
81
+ ```
82
+ ANTHROPIC_API_KEY=your_key_here
83
+ HF_TOKEN=your_hf_token_here
84
+ GITHUB_TOKEN=your_github_token_here
85
+ HF_NAMESPACE=your_namespace_here
86
+ ```
create-pr.sh ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Colors for output
5
+ GREEN='\033[0;32m'
6
+ BLUE='\033[0;34m'
7
+ RED='\033[0;31m'
8
+ NC='\033[0m' # No Color
9
+
10
+ # Check arguments
11
+ if [ $# -lt 1 ]; then
12
+ echo -e "${RED}Usage: ./create-pr.sh \"PR Title\" [\"Optional description\"]${NC}"
13
+ echo ""
14
+ echo "Example:"
15
+ echo " ./create-pr.sh \"Fix authentication bug\" \"This fixes the dev mode auth issue\""
16
+ exit 1
17
+ fi
18
+
19
+ TITLE="$1"
20
+ DESCRIPTION="${2:-}"
21
+
22
+ # Get current branch
23
+ BRANCH=$(git rev-parse --abbrev-ref HEAD)
24
+
25
+ if [ "$BRANCH" = "main" ]; then
26
+ echo -e "${RED}Error: You're on the main branch. Please create a feature branch first.${NC}"
27
+ exit 1
28
+ fi
29
+
30
+ echo -e "${BLUE}Creating PR for branch: ${GREEN}$BRANCH${NC}"
31
+ echo -e "${BLUE}Title: ${GREEN}$TITLE${NC}"
32
+
33
+ # Get HF_TOKEN from .env
34
+ if [ ! -f .env ]; then
35
+ echo -e "${RED}Error: .env file not found${NC}"
36
+ exit 1
37
+ fi
38
+
39
+ HF_TOKEN=$(grep HF_TOKEN .env | cut -d '=' -f2)
40
+
41
+ if [ -z "$HF_TOKEN" ]; then
42
+ echo -e "${RED}Error: HF_TOKEN not found in .env${NC}"
43
+ exit 1
44
+ fi
45
+
46
+ # Create PR using HuggingFace API
47
+ echo -e "${BLUE}Creating pull request...${NC}"
48
+
49
+ PR_INFO=$(HF_TOKEN="$HF_TOKEN" uv run python - <<EOF
50
+ from huggingface_hub import HfApi
51
+ import os
52
+
53
+ api = HfApi(token=os.environ.get('HF_TOKEN'))
54
+
55
+ description = """$DESCRIPTION"""
56
+
57
+ discussion = api.create_discussion(
58
+ repo_id='smolagents/ml-agent',
59
+ repo_type='space',
60
+ title="""$TITLE""",
61
+ description=description if description.strip() else "Changes from branch $BRANCH",
62
+ pull_request=True,
63
+ )
64
+
65
+ print(f"{discussion.num}|{discussion.url}")
66
+ EOF
67
+ )
68
+
69
+ PR_NUM=$(echo "$PR_INFO" | cut -d '|' -f1)
70
+ PR_URL=$(echo "$PR_INFO" | cut -d '|' -f2)
71
+
72
+ echo -e "${GREEN}✓ PR created: #$PR_NUM${NC}"
73
+ echo -e "${BLUE}URL: $PR_URL${NC}"
74
+
75
+ # Push branch to PR ref
76
+ echo -e "${BLUE}Pushing changes to PR...${NC}"
77
+ git push -f origin "$BRANCH:refs/pr/$PR_NUM"
78
+
79
+ echo -e "${GREEN}✓ Done! Your PR is ready:${NC}"
80
+ echo -e "${GREEN} $PR_URL${NC}"
frontend/src/hooks/useAgentChat.ts CHANGED
@@ -331,6 +331,16 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
331
  chatActionsRef.current.setMessages = chat.setMessages;
332
  chatActionsRef.current.messages = chat.messages;
333
 
 
 
 
 
 
 
 
 
 
 
334
  // -- Hydrate from backend on mount (page refresh recovery) --------------
335
  useEffect(() => {
336
  let cancelled = false;
@@ -360,7 +370,7 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
360
  if (msgsRes.ok) {
361
  const data = await msgsRes.json();
362
  if (cancelled || !Array.isArray(data) || data.length === 0) return;
363
- const uiMsgs = llmMessagesToUIMessages(data, pendingIds);
364
  if (uiMsgs.length > 0) {
365
  chat.setMessages(uiMsgs);
366
  saveMessages(sessionId, uiMsgs);
@@ -476,12 +486,18 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
476
  const toolName = event.data?.tool as string;
477
  if (state === 'running' && toolName) sideChannel.onToolRunning(toolName);
478
  } else if (et === 'turn_complete' || et === 'error' || et === 'interrupted') {
 
479
  sideChannel.onProcessingDone();
480
  stopReconnect();
481
  // Final hydration to get the complete message state
482
  const result = await hydrateMessages();
483
  if (result) {
484
- const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds);
 
 
 
 
 
485
  if (uiMsgs.length > 0) {
486
  chat.setMessages(uiMsgs);
487
  saveMessages(sessionId, uiMsgs);
@@ -489,13 +505,18 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
489
  }
490
  return;
491
  } else if (et === 'approval_required') {
 
492
  sideChannel.onApprovalRequired(
493
  (event.data?.tools || []) as Array<{ tool: string; arguments: Record<string, unknown>; tool_call_id: string }>,
494
  );
495
  stopReconnect();
496
  const result = await hydrateMessages();
497
  if (result) {
498
- const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds);
 
 
 
 
499
  if (uiMsgs.length > 0) {
500
  chat.setMessages(uiMsgs);
501
  saveMessages(sessionId, uiMsgs);
@@ -514,12 +535,18 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
514
  const onVisible = async () => {
515
  if (document.visibilityState !== 'visible') return;
516
 
 
517
  // Always re-hydrate messages on wake
518
  const result = await hydrateMessages();
519
  if (!result) return;
520
 
521
  const { data, pendingIds, info } = result;
522
- const uiMsgs = llmMessagesToUIMessages(data, pendingIds);
 
 
 
 
 
523
  if (uiMsgs.length > 0) {
524
  chat.setMessages(uiMsgs);
525
  saveMessages(sessionId, uiMsgs);
@@ -527,6 +554,7 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
527
 
528
  // If the backend is still processing, reconnect to the live event stream
529
  if (info?.is_processing) {
 
530
  updateSession(sessionId, { isProcessing: true, activityStatus: { type: 'thinking' } });
531
 
532
  // Stop any previous reconnection
@@ -540,15 +568,31 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
540
  // Poll messages every 3 s so the chat message list stays up-to-date
541
  // (the event stream gives us real-time status but not full message diffs)
542
  pollTimerRef.current = setInterval(async () => {
 
543
  const fresh = await hydrateMessages();
544
  if (!fresh) return;
545
- const msgs = llmMessagesToUIMessages(fresh.data, fresh.pendingIds);
546
- if (msgs.length > 0) {
 
 
 
 
 
 
 
 
547
  chat.setMessages(msgs);
548
  saveMessages(sessionId, msgs);
 
 
 
 
 
549
  }
 
550
  // If backend stopped processing, clean up
551
  if (fresh.info && !fresh.info.is_processing) {
 
552
  updateSession(sessionId, { isProcessing: false });
553
  stopReconnect();
554
  }
@@ -568,8 +612,20 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
568
  useEffect(() => {
569
  if (chat.messages.length === 0) return;
570
  if (chat.messages.length !== prevLenRef.current) {
 
 
 
 
 
 
571
  prevLenRef.current = chat.messages.length;
572
  saveMessages(sessionId, chat.messages);
 
 
 
 
 
 
573
  }
574
  }, [sessionId, chat.messages]);
575
 
 
331
  chatActionsRef.current.setMessages = chat.setMessages;
332
  chatActionsRef.current.messages = chat.messages;
333
 
334
+ // -- Debug: track SDK message changes ------------------------------------
335
+ useEffect(() => {
336
+ console.log('[useAgentChat] 📨 SDK messages changed:', {
337
+ sessionId,
338
+ count: chat.messages.length,
339
+ messages: chat.messages,
340
+ lastMessage: chat.messages[chat.messages.length - 1],
341
+ });
342
+ }, [sessionId, chat.messages]);
343
+
344
  // -- Hydrate from backend on mount (page refresh recovery) --------------
345
  useEffect(() => {
346
  let cancelled = false;
 
370
  if (msgsRes.ok) {
371
  const data = await msgsRes.json();
372
  if (cancelled || !Array.isArray(data) || data.length === 0) return;
373
+ const uiMsgs = llmMessagesToUIMessages(data, pendingIds, chatActionsRef.current.messages);
374
  if (uiMsgs.length > 0) {
375
  chat.setMessages(uiMsgs);
376
  saveMessages(sessionId, uiMsgs);
 
486
  const toolName = event.data?.tool as string;
487
  if (state === 'running' && toolName) sideChannel.onToolRunning(toolName);
488
  } else if (et === 'turn_complete' || et === 'error' || et === 'interrupted') {
489
+ console.log('[useAgentChat] 🏁 Turn complete event:', { sessionId, eventType: et });
490
  sideChannel.onProcessingDone();
491
  stopReconnect();
492
  // Final hydration to get the complete message state
493
  const result = await hydrateMessages();
494
  if (result) {
495
+ const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds, chatActionsRef.current.messages);
496
+ console.log('[useAgentChat] 🔄 Final hydration after turn_complete:', {
497
+ sessionId,
498
+ messageCount: uiMsgs.length,
499
+ messages: uiMsgs,
500
+ });
501
  if (uiMsgs.length > 0) {
502
  chat.setMessages(uiMsgs);
503
  saveMessages(sessionId, uiMsgs);
 
505
  }
506
  return;
507
  } else if (et === 'approval_required') {
508
+ console.log('[useAgentChat] 🔔 Approval required event:', { sessionId });
509
  sideChannel.onApprovalRequired(
510
  (event.data?.tools || []) as Array<{ tool: string; arguments: Record<string, unknown>; tool_call_id: string }>,
511
  );
512
  stopReconnect();
513
  const result = await hydrateMessages();
514
  if (result) {
515
+ const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds, chatActionsRef.current.messages);
516
+ console.log('[useAgentChat] 🔄 Hydration after approval_required:', {
517
+ sessionId,
518
+ messageCount: uiMsgs.length,
519
+ });
520
  if (uiMsgs.length > 0) {
521
  chat.setMessages(uiMsgs);
522
  saveMessages(sessionId, uiMsgs);
 
535
  const onVisible = async () => {
536
  if (document.visibilityState !== 'visible') return;
537
 
538
+ console.log('[useAgentChat] 👀 Tab became visible, re-hydrating:', { sessionId });
539
  // Always re-hydrate messages on wake
540
  const result = await hydrateMessages();
541
  if (!result) return;
542
 
543
  const { data, pendingIds, info } = result;
544
+ const uiMsgs = llmMessagesToUIMessages(data, pendingIds, chatActionsRef.current.messages);
545
+ console.log('[useAgentChat] 🔄 Wake hydration result:', {
546
+ sessionId,
547
+ messageCount: uiMsgs.length,
548
+ isProcessing: info?.is_processing,
549
+ });
550
  if (uiMsgs.length > 0) {
551
  chat.setMessages(uiMsgs);
552
  saveMessages(sessionId, uiMsgs);
 
554
 
555
  // If the backend is still processing, reconnect to the live event stream
556
  if (info?.is_processing) {
557
+ console.log('[useAgentChat] 🔌 Backend still processing, starting polling:', { sessionId });
558
  updateSession(sessionId, { isProcessing: true, activityStatus: { type: 'thinking' } });
559
 
560
  // Stop any previous reconnection
 
568
  // Poll messages every 3 s so the chat message list stays up-to-date
569
  // (the event stream gives us real-time status but not full message diffs)
570
  pollTimerRef.current = setInterval(async () => {
571
+ console.log('[useAgentChat] ⏱️ Polling tick:', { sessionId });
572
  const fresh = await hydrateMessages();
573
  if (!fresh) return;
574
+ const msgs = llmMessagesToUIMessages(fresh.data, fresh.pendingIds, chatActionsRef.current.messages);
575
+ console.log('[useAgentChat] 🔄 Poll hydration result:', {
576
+ sessionId,
577
+ messageCount: msgs.length,
578
+ currentCount: chatActionsRef.current.messages.length,
579
+ });
580
+
581
+ const currentCount = chatActionsRef.current.messages.length;
582
+ if (msgs.length > currentCount || currentCount === 0) {
583
+ console.log('[useAgentChat] ✅ Applying poll update (backend has more messages)');
584
  chat.setMessages(msgs);
585
  saveMessages(sessionId, msgs);
586
+ } else {
587
+ console.log('[useAgentChat] ⏭️ Skipping poll update (backend state is stale or equal):', {
588
+ backendCount: msgs.length,
589
+ sdkCount: currentCount,
590
+ });
591
  }
592
+
593
  // If backend stopped processing, clean up
594
  if (fresh.info && !fresh.info.is_processing) {
595
+ console.log('[useAgentChat] ⏹️ Backend finished, stopping poll:', { sessionId });
596
  updateSession(sessionId, { isProcessing: false });
597
  stopReconnect();
598
  }
 
612
  useEffect(() => {
613
  if (chat.messages.length === 0) return;
614
  if (chat.messages.length !== prevLenRef.current) {
615
+ console.log('[useAgentChat] 💾 Saving messages (count changed):', {
616
+ sessionId,
617
+ prevCount: prevLenRef.current,
618
+ newCount: chat.messages.length,
619
+ messages: chat.messages,
620
+ });
621
  prevLenRef.current = chat.messages.length;
622
  saveMessages(sessionId, chat.messages);
623
+ } else {
624
+ console.log('[useAgentChat] ⏭️ Skipping save (count unchanged):', {
625
+ sessionId,
626
+ count: chat.messages.length,
627
+ lastMessage: chat.messages[chat.messages.length - 1],
628
+ });
629
  }
630
  }, [sessionId, chat.messages]);
631
 
frontend/src/lib/chat-message-store.ts CHANGED
@@ -38,10 +38,21 @@ function writeAll(map: MessagesMap): void {
38
 
39
  export function loadMessages(sessionId: string): UIMessage[] {
40
  const map = readAll();
41
- return map[sessionId] ?? [];
 
 
 
 
 
 
42
  }
43
 
44
  export function saveMessages(sessionId: string, messages: UIMessage[]): void {
 
 
 
 
 
45
  const map = readAll();
46
  map[sessionId] = messages;
47
 
 
38
 
39
  export function loadMessages(sessionId: string): UIMessage[] {
40
  const map = readAll();
41
+ const messages = map[sessionId] ?? [];
42
+ console.log('[chat-message-store] 📖 Loading messages from localStorage:', {
43
+ sessionId,
44
+ count: messages.length,
45
+ messages,
46
+ });
47
+ return messages;
48
  }
49
 
50
  export function saveMessages(sessionId: string, messages: UIMessage[]): void {
51
+ console.log('[chat-message-store] 💾 Writing messages to localStorage:', {
52
+ sessionId,
53
+ count: messages.length,
54
+ lastMessage: messages[messages.length - 1],
55
+ });
56
  const map = readAll();
57
  map[sessionId] = messages;
58
 
frontend/src/lib/convert-llm-messages.ts CHANGED
@@ -16,19 +16,24 @@ interface LLMMessage {
16
  name?: string | null;
17
  }
18
 
19
- let idCounter = 0;
 
 
20
  function nextId(): string {
21
- return `msg-${Date.now()}-${++idCounter}`;
22
  }
23
 
24
  /**
25
  * @param pendingApprovalIds - Set of tool_call_ids that are waiting for approval.
26
  * When provided, matching tool calls without results will get state
27
  * 'approval-requested' instead of 'input-available'.
 
 
28
  */
29
  export function llmMessagesToUIMessages(
30
  messages: LLMMessage[],
31
  pendingApprovalIds?: Set<string>,
 
32
  ): UIMessage[] {
33
  // Build a map of tool_call_id -> tool result for pairing
34
  const toolResults = new Map<string, { output: string; isError: boolean }>();
@@ -43,13 +48,22 @@ export function llmMessagesToUIMessages(
43
 
44
  const uiMessages: UIMessage[] = [];
45
 
 
 
 
 
 
 
 
46
  for (const msg of messages) {
47
  if (msg.role === 'system') continue;
48
  if (msg.role === 'tool') continue; // handled via tool_calls pairing
49
 
50
  if (msg.role === 'user') {
 
 
51
  uiMessages.push({
52
- id: nextId(),
53
  role: 'user',
54
  parts: [{ type: 'text', text: msg.content || '' }],
55
  });
@@ -107,10 +121,23 @@ export function llmMessagesToUIMessages(
107
  // per LLM API call), so merge consecutive assistant messages to match.
108
  const prev = uiMessages[uiMessages.length - 1];
109
  if (prev && prev.role === 'assistant') {
 
 
 
 
 
110
  prev.parts.push(...parts);
111
  } else {
 
 
 
 
 
 
 
 
112
  uiMessages.push({
113
- id: nextId(),
114
  role: 'assistant',
115
  parts,
116
  });
 
16
  name?: string | null;
17
  }
18
 
19
+ // Generate stable IDs based on message position to prevent duplicate renders
20
+ // when the same message is re-converted multiple times (e.g., during polling)
21
+ let uiMessageCounter = 0;
22
  function nextId(): string {
23
+ return `msg-${++uiMessageCounter}`;
24
  }
25
 
26
  /**
27
  * @param pendingApprovalIds - Set of tool_call_ids that are waiting for approval.
28
  * When provided, matching tool calls without results will get state
29
  * 'approval-requested' instead of 'input-available'.
30
+ * @param existingUIMessages - Current UI messages to preserve IDs when content matches.
31
+ * This prevents React from re-rendering messages with new IDs during polling.
32
  */
33
  export function llmMessagesToUIMessages(
34
  messages: LLMMessage[],
35
  pendingApprovalIds?: Set<string>,
36
+ existingUIMessages?: UIMessage[],
37
  ): UIMessage[] {
38
  // Build a map of tool_call_id -> tool result for pairing
39
  const toolResults = new Map<string, { output: string; isError: boolean }>();
 
48
 
49
  const uiMessages: UIMessage[] = [];
50
 
51
+ // Helper to get existing message ID at a given position if roles match
52
+ const getExistingId = (index: number, role: 'user' | 'assistant'): string | null => {
53
+ if (!existingUIMessages || index >= existingUIMessages.length) return null;
54
+ const existing = existingUIMessages[index];
55
+ return existing.role === role ? existing.id : null;
56
+ };
57
+
58
  for (const msg of messages) {
59
  if (msg.role === 'system') continue;
60
  if (msg.role === 'tool') continue; // handled via tool_calls pairing
61
 
62
  if (msg.role === 'user') {
63
+ // Try to reuse existing ID if the message at this position matches
64
+ const existingId = getExistingId(uiMessages.length, 'user');
65
  uiMessages.push({
66
+ id: existingId || nextId(),
67
  role: 'user',
68
  parts: [{ type: 'text', text: msg.content || '' }],
69
  });
 
121
  // per LLM API call), so merge consecutive assistant messages to match.
122
  const prev = uiMessages[uiMessages.length - 1];
123
  if (prev && prev.role === 'assistant') {
124
+ console.log('[convert-llm] 🔗 Merging consecutive assistant messages:', {
125
+ prevId: prev.id,
126
+ prevPartCount: prev.parts.length,
127
+ newPartCount: parts.length,
128
+ });
129
  prev.parts.push(...parts);
130
  } else {
131
+ // Try to reuse existing ID if the message at this position matches
132
+ const existingId = getExistingId(uiMessages.length, 'assistant');
133
+ const newId = existingId || nextId();
134
+ console.log('[convert-llm] ➕ Creating new assistant message:', {
135
+ id: newId,
136
+ partCount: parts.length,
137
+ reusedId: !!existingId,
138
+ });
139
  uiMessages.push({
140
+ id: newId,
141
  role: 'assistant',
142
  parts,
143
  });