elisaklunder commited on
Commit
0cc1ea4
·
1 Parent(s): 7ccf897

Fix bugs of message disappearing sometimes and being duplicate sometimes

Browse files
CONTRIBUTING.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to ml-agent
2
+
3
+ ## Creating a Pull Request
4
+
5
+ ### 1. Create and work on your branch
6
+
7
+ ```bash
8
+ # Create a new branch
9
+ git checkout -b your-feature-branch
10
+
11
+ # Make your changes, then commit
12
+ git add .
13
+ git commit -m "Your commit message"
14
+ ```
15
+
16
+ ### 2. Push your branch to keep changes backed up
17
+
18
+ ```bash
19
+ # First time pushing this branch
20
+ git push --set-upstream origin your-feature-branch
21
+
22
+ # Subsequent pushes
23
+ git push
24
+ ```
25
+
26
+ ### 3. When ready, create a Pull Request
27
+
28
+ ```bash
29
+ # Easy way: Use the helper script
30
+ ./create-pr.sh "Your PR title" "Optional description"
31
+
32
+ # This automatically:
33
+ # - Gets your current branch
34
+ # - Creates the PR
35
+ # - Pushes your changes to it
36
+ ```
37
+
38
+ ### Example workflow
39
+
40
+ ```bash
41
+ # 1. Create branch
42
+ git checkout -b fix-bug-123
43
+
44
+ # 2. Make changes and commit
45
+ git add backend/main.py
46
+ git commit -m "Fix authentication bug"
47
+
48
+ # 3. Push to keep changes safe
49
+ git push --set-upstream origin fix-bug-123
50
+
51
+ # 4. Continue working...
52
+ git add frontend/src/App.tsx
53
+ git commit -m "Update UI"
54
+ git push
55
+
56
+ # 5. When ready, create PR
57
+ ./create-pr.sh "Fix authentication bug" "Fixes issue where users could not authenticate in dev mode"
58
+ ```
59
+
60
+ ## Development Setup
61
+
62
+ ### Running locally with hot-reload
63
+
64
+ **Backend:**
65
+ ```bash
66
+ cd backend
67
+ uv run uvicorn main:app --host 0.0.0.0 --port 7860 --reload
68
+ ```
69
+
70
+ **Frontend** (in a separate terminal):
71
+ ```bash
72
+ cd frontend
73
+ npm run dev
74
+ ```
75
+
76
+ Access the app at http://localhost:5173
77
+
78
+ ### Environment Variables
79
+
80
+ Make sure your `.env` file in the project root contains:
81
+ ```
82
+ ANTHROPIC_API_KEY=your_key_here
83
+ HF_TOKEN=your_hf_token_here
84
+ GITHUB_TOKEN=your_github_token_here
85
+ HF_NAMESPACE=your_namespace_here
86
+ ```
create-pr.sh ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Colors for output
5
+ GREEN='\033[0;32m'
6
+ BLUE='\033[0;34m'
7
+ RED='\033[0;31m'
8
+ NC='\033[0m' # No Color
9
+
10
+ # Check arguments
11
+ if [ $# -lt 1 ]; then
12
+ echo -e "${RED}Usage: ./create-pr.sh \"PR Title\" [\"Optional description\"]${NC}"
13
+ echo ""
14
+ echo "Example:"
15
+ echo " ./create-pr.sh \"Fix authentication bug\" \"This fixes the dev mode auth issue\""
16
+ exit 1
17
+ fi
18
+
19
+ TITLE="$1"
20
+ DESCRIPTION="${2:-}"
21
+
22
+ # Get current branch
23
+ BRANCH=$(git rev-parse --abbrev-ref HEAD)
24
+
25
+ if [ "$BRANCH" = "main" ]; then
26
+ echo -e "${RED}Error: You're on the main branch. Please create a feature branch first.${NC}"
27
+ exit 1
28
+ fi
29
+
30
+ echo -e "${BLUE}Creating PR for branch: ${GREEN}$BRANCH${NC}"
31
+ echo -e "${BLUE}Title: ${GREEN}$TITLE${NC}"
32
+
33
+ # Get HF_TOKEN from .env
34
+ if [ ! -f .env ]; then
35
+ echo -e "${RED}Error: .env file not found${NC}"
36
+ exit 1
37
+ fi
38
+
39
+ HF_TOKEN=$(grep HF_TOKEN .env | cut -d '=' -f2)
40
+
41
+ if [ -z "$HF_TOKEN" ]; then
42
+ echo -e "${RED}Error: HF_TOKEN not found in .env${NC}"
43
+ exit 1
44
+ fi
45
+
46
+ # Create PR using HuggingFace API
47
+ echo -e "${BLUE}Creating pull request...${NC}"
48
+
49
+ PR_INFO=$(HF_TOKEN="$HF_TOKEN" uv run python - <<EOF
50
+ from huggingface_hub import HfApi
51
+ import os
52
+
53
+ api = HfApi(token=os.environ.get('HF_TOKEN'))
54
+
55
+ description = """$DESCRIPTION"""
56
+
57
+ discussion = api.create_discussion(
58
+ repo_id='smolagents/ml-agent',
59
+ repo_type='space',
60
+ title="""$TITLE""",
61
+ description=description if description.strip() else "Changes from branch $BRANCH",
62
+ pull_request=True,
63
+ )
64
+
65
+ print(f"{discussion.num}|{discussion.url}")
66
+ EOF
67
+ )
68
+
69
+ PR_NUM=$(echo "$PR_INFO" | cut -d '|' -f1)
70
+ PR_URL=$(echo "$PR_INFO" | cut -d '|' -f2)
71
+
72
+ echo -e "${GREEN}✓ PR created: #$PR_NUM${NC}"
73
+ echo -e "${BLUE}URL: $PR_URL${NC}"
74
+
75
+ # Push branch to PR ref
76
+ echo -e "${BLUE}Pushing changes to PR...${NC}"
77
+ git push -f origin "$BRANCH:refs/pr/$PR_NUM"
78
+
79
+ echo -e "${GREEN}✓ Done! Your PR is ready:${NC}"
80
+ echo -e "${GREEN} $PR_URL${NC}"
frontend/src/hooks/useAgentChat.ts CHANGED
@@ -303,6 +303,16 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
303
  chatActionsRef.current.setMessages = chat.setMessages;
304
  chatActionsRef.current.messages = chat.messages;
305
 
 
 
 
 
 
 
 
 
 
 
306
  // -- Hydrate from backend on mount (page refresh recovery) --------------
307
  useEffect(() => {
308
  let cancelled = false;
@@ -332,7 +342,7 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
332
  if (msgsRes.ok) {
333
  const data = await msgsRes.json();
334
  if (cancelled || !Array.isArray(data) || data.length === 0) return;
335
- const uiMsgs = llmMessagesToUIMessages(data, pendingIds);
336
  if (uiMsgs.length > 0) {
337
  chat.setMessages(uiMsgs);
338
  saveMessages(sessionId, uiMsgs);
@@ -448,12 +458,18 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
448
  const toolName = event.data?.tool as string;
449
  if (state === 'running' && toolName) sideChannel.onToolRunning(toolName);
450
  } else if (et === 'turn_complete' || et === 'error' || et === 'interrupted') {
 
451
  sideChannel.onProcessingDone();
452
  stopReconnect();
453
  // Final hydration to get the complete message state
454
  const result = await hydrateMessages();
455
  if (result) {
456
- const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds);
 
 
 
 
 
457
  if (uiMsgs.length > 0) {
458
  chat.setMessages(uiMsgs);
459
  saveMessages(sessionId, uiMsgs);
@@ -461,13 +477,18 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
461
  }
462
  return;
463
  } else if (et === 'approval_required') {
 
464
  sideChannel.onApprovalRequired(
465
  (event.data?.tools || []) as Array<{ tool: string; arguments: Record<string, unknown>; tool_call_id: string }>,
466
  );
467
  stopReconnect();
468
  const result = await hydrateMessages();
469
  if (result) {
470
- const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds);
 
 
 
 
471
  if (uiMsgs.length > 0) {
472
  chat.setMessages(uiMsgs);
473
  saveMessages(sessionId, uiMsgs);
@@ -486,12 +507,18 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
486
  const onVisible = async () => {
487
  if (document.visibilityState !== 'visible') return;
488
 
 
489
  // Always re-hydrate messages on wake
490
  const result = await hydrateMessages();
491
  if (!result) return;
492
 
493
  const { data, pendingIds, info } = result;
494
- const uiMsgs = llmMessagesToUIMessages(data, pendingIds);
 
 
 
 
 
495
  if (uiMsgs.length > 0) {
496
  chat.setMessages(uiMsgs);
497
  saveMessages(sessionId, uiMsgs);
@@ -499,6 +526,7 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
499
 
500
  // If the backend is still processing, reconnect to the live event stream
501
  if (info?.is_processing) {
 
502
  updateSession(sessionId, { isProcessing: true, activityStatus: { type: 'thinking' } });
503
 
504
  // Stop any previous reconnection
@@ -512,15 +540,31 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
512
  // Poll messages every 3 s so the chat message list stays up-to-date
513
  // (the event stream gives us real-time status but not full message diffs)
514
  pollTimerRef.current = setInterval(async () => {
 
515
  const fresh = await hydrateMessages();
516
  if (!fresh) return;
517
- const msgs = llmMessagesToUIMessages(fresh.data, fresh.pendingIds);
518
- if (msgs.length > 0) {
 
 
 
 
 
 
 
 
519
  chat.setMessages(msgs);
520
  saveMessages(sessionId, msgs);
 
 
 
 
 
521
  }
 
522
  // If backend stopped processing, clean up
523
  if (fresh.info && !fresh.info.is_processing) {
 
524
  updateSession(sessionId, { isProcessing: false });
525
  stopReconnect();
526
  }
@@ -540,8 +584,20 @@ export function useAgentChat({ sessionId, isActive, onReady, onError, onSessionD
540
  useEffect(() => {
541
  if (chat.messages.length === 0) return;
542
  if (chat.messages.length !== prevLenRef.current) {
 
 
 
 
 
 
543
  prevLenRef.current = chat.messages.length;
544
  saveMessages(sessionId, chat.messages);
 
 
 
 
 
 
545
  }
546
  }, [sessionId, chat.messages]);
547
 
 
303
  chatActionsRef.current.setMessages = chat.setMessages;
304
  chatActionsRef.current.messages = chat.messages;
305
 
306
+ // -- Debug: track SDK message changes ------------------------------------
307
+ useEffect(() => {
308
+ console.log('[useAgentChat] 📨 SDK messages changed:', {
309
+ sessionId,
310
+ count: chat.messages.length,
311
+ messages: chat.messages,
312
+ lastMessage: chat.messages[chat.messages.length - 1],
313
+ });
314
+ }, [sessionId, chat.messages]);
315
+
316
  // -- Hydrate from backend on mount (page refresh recovery) --------------
317
  useEffect(() => {
318
  let cancelled = false;
 
342
  if (msgsRes.ok) {
343
  const data = await msgsRes.json();
344
  if (cancelled || !Array.isArray(data) || data.length === 0) return;
345
+ const uiMsgs = llmMessagesToUIMessages(data, pendingIds, chatActionsRef.current.messages);
346
  if (uiMsgs.length > 0) {
347
  chat.setMessages(uiMsgs);
348
  saveMessages(sessionId, uiMsgs);
 
458
  const toolName = event.data?.tool as string;
459
  if (state === 'running' && toolName) sideChannel.onToolRunning(toolName);
460
  } else if (et === 'turn_complete' || et === 'error' || et === 'interrupted') {
461
+ console.log('[useAgentChat] 🏁 Turn complete event:', { sessionId, eventType: et });
462
  sideChannel.onProcessingDone();
463
  stopReconnect();
464
  // Final hydration to get the complete message state
465
  const result = await hydrateMessages();
466
  if (result) {
467
+ const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds, chatActionsRef.current.messages);
468
+ console.log('[useAgentChat] 🔄 Final hydration after turn_complete:', {
469
+ sessionId,
470
+ messageCount: uiMsgs.length,
471
+ messages: uiMsgs,
472
+ });
473
  if (uiMsgs.length > 0) {
474
  chat.setMessages(uiMsgs);
475
  saveMessages(sessionId, uiMsgs);
 
477
  }
478
  return;
479
  } else if (et === 'approval_required') {
480
+ console.log('[useAgentChat] 🔔 Approval required event:', { sessionId });
481
  sideChannel.onApprovalRequired(
482
  (event.data?.tools || []) as Array<{ tool: string; arguments: Record<string, unknown>; tool_call_id: string }>,
483
  );
484
  stopReconnect();
485
  const result = await hydrateMessages();
486
  if (result) {
487
+ const uiMsgs = llmMessagesToUIMessages(result.data, result.pendingIds, chatActionsRef.current.messages);
488
+ console.log('[useAgentChat] 🔄 Hydration after approval_required:', {
489
+ sessionId,
490
+ messageCount: uiMsgs.length,
491
+ });
492
  if (uiMsgs.length > 0) {
493
  chat.setMessages(uiMsgs);
494
  saveMessages(sessionId, uiMsgs);
 
507
  const onVisible = async () => {
508
  if (document.visibilityState !== 'visible') return;
509
 
510
+ console.log('[useAgentChat] 👀 Tab became visible, re-hydrating:', { sessionId });
511
  // Always re-hydrate messages on wake
512
  const result = await hydrateMessages();
513
  if (!result) return;
514
 
515
  const { data, pendingIds, info } = result;
516
+ const uiMsgs = llmMessagesToUIMessages(data, pendingIds, chatActionsRef.current.messages);
517
+ console.log('[useAgentChat] 🔄 Wake hydration result:', {
518
+ sessionId,
519
+ messageCount: uiMsgs.length,
520
+ isProcessing: info?.is_processing,
521
+ });
522
  if (uiMsgs.length > 0) {
523
  chat.setMessages(uiMsgs);
524
  saveMessages(sessionId, uiMsgs);
 
526
 
527
  // If the backend is still processing, reconnect to the live event stream
528
  if (info?.is_processing) {
529
+ console.log('[useAgentChat] 🔌 Backend still processing, starting polling:', { sessionId });
530
  updateSession(sessionId, { isProcessing: true, activityStatus: { type: 'thinking' } });
531
 
532
  // Stop any previous reconnection
 
540
  // Poll messages every 3 s so the chat message list stays up-to-date
541
  // (the event stream gives us real-time status but not full message diffs)
542
  pollTimerRef.current = setInterval(async () => {
543
+ console.log('[useAgentChat] ⏱️ Polling tick:', { sessionId });
544
  const fresh = await hydrateMessages();
545
  if (!fresh) return;
546
+ const msgs = llmMessagesToUIMessages(fresh.data, fresh.pendingIds, chatActionsRef.current.messages);
547
+ console.log('[useAgentChat] 🔄 Poll hydration result:', {
548
+ sessionId,
549
+ messageCount: msgs.length,
550
+ currentCount: chatActionsRef.current.messages.length,
551
+ });
552
+
553
+ const currentCount = chatActionsRef.current.messages.length;
554
+ if (msgs.length > currentCount || currentCount === 0) {
555
+ console.log('[useAgentChat] ✅ Applying poll update (backend has more messages)');
556
  chat.setMessages(msgs);
557
  saveMessages(sessionId, msgs);
558
+ } else {
559
+ console.log('[useAgentChat] ⏭️ Skipping poll update (backend state is stale or equal):', {
560
+ backendCount: msgs.length,
561
+ sdkCount: currentCount,
562
+ });
563
  }
564
+
565
  // If backend stopped processing, clean up
566
  if (fresh.info && !fresh.info.is_processing) {
567
+ console.log('[useAgentChat] ⏹️ Backend finished, stopping poll:', { sessionId });
568
  updateSession(sessionId, { isProcessing: false });
569
  stopReconnect();
570
  }
 
584
  useEffect(() => {
585
  if (chat.messages.length === 0) return;
586
  if (chat.messages.length !== prevLenRef.current) {
587
+ console.log('[useAgentChat] 💾 Saving messages (count changed):', {
588
+ sessionId,
589
+ prevCount: prevLenRef.current,
590
+ newCount: chat.messages.length,
591
+ messages: chat.messages,
592
+ });
593
  prevLenRef.current = chat.messages.length;
594
  saveMessages(sessionId, chat.messages);
595
+ } else {
596
+ console.log('[useAgentChat] ⏭️ Skipping save (count unchanged):', {
597
+ sessionId,
598
+ count: chat.messages.length,
599
+ lastMessage: chat.messages[chat.messages.length - 1],
600
+ });
601
  }
602
  }, [sessionId, chat.messages]);
603
 
frontend/src/lib/chat-message-store.ts CHANGED
@@ -38,10 +38,21 @@ function writeAll(map: MessagesMap): void {
38
 
39
  export function loadMessages(sessionId: string): UIMessage[] {
40
  const map = readAll();
41
- return map[sessionId] ?? [];
 
 
 
 
 
 
42
  }
43
 
44
  export function saveMessages(sessionId: string, messages: UIMessage[]): void {
 
 
 
 
 
45
  const map = readAll();
46
  map[sessionId] = messages;
47
 
 
38
 
39
  export function loadMessages(sessionId: string): UIMessage[] {
40
  const map = readAll();
41
+ const messages = map[sessionId] ?? [];
42
+ console.log('[chat-message-store] 📖 Loading messages from localStorage:', {
43
+ sessionId,
44
+ count: messages.length,
45
+ messages,
46
+ });
47
+ return messages;
48
  }
49
 
50
  export function saveMessages(sessionId: string, messages: UIMessage[]): void {
51
+ console.log('[chat-message-store] 💾 Writing messages to localStorage:', {
52
+ sessionId,
53
+ count: messages.length,
54
+ lastMessage: messages[messages.length - 1],
55
+ });
56
  const map = readAll();
57
  map[sessionId] = messages;
58
 
frontend/src/lib/convert-llm-messages.ts CHANGED
@@ -16,19 +16,24 @@ interface LLMMessage {
16
  name?: string | null;
17
  }
18
 
19
- let idCounter = 0;
 
 
20
  function nextId(): string {
21
- return `msg-${Date.now()}-${++idCounter}`;
22
  }
23
 
24
  /**
25
  * @param pendingApprovalIds - Set of tool_call_ids that are waiting for approval.
26
  * When provided, matching tool calls without results will get state
27
  * 'approval-requested' instead of 'input-available'.
 
 
28
  */
29
  export function llmMessagesToUIMessages(
30
  messages: LLMMessage[],
31
  pendingApprovalIds?: Set<string>,
 
32
  ): UIMessage[] {
33
  // Build a map of tool_call_id -> tool result for pairing
34
  const toolResults = new Map<string, { output: string; isError: boolean }>();
@@ -43,13 +48,22 @@ export function llmMessagesToUIMessages(
43
 
44
  const uiMessages: UIMessage[] = [];
45
 
 
 
 
 
 
 
 
46
  for (const msg of messages) {
47
  if (msg.role === 'system') continue;
48
  if (msg.role === 'tool') continue; // handled via tool_calls pairing
49
 
50
  if (msg.role === 'user') {
 
 
51
  uiMessages.push({
52
- id: nextId(),
53
  role: 'user',
54
  parts: [{ type: 'text', text: msg.content || '' }],
55
  });
@@ -107,10 +121,23 @@ export function llmMessagesToUIMessages(
107
  // per LLM API call), so merge consecutive assistant messages to match.
108
  const prev = uiMessages[uiMessages.length - 1];
109
  if (prev && prev.role === 'assistant') {
 
 
 
 
 
110
  prev.parts.push(...parts);
111
  } else {
 
 
 
 
 
 
 
 
112
  uiMessages.push({
113
- id: nextId(),
114
  role: 'assistant',
115
  parts,
116
  });
 
16
  name?: string | null;
17
  }
18
 
19
+ // Generate stable IDs based on message position to prevent duplicate renders
20
+ // when the same message is re-converted multiple times (e.g., during polling)
21
+ let uiMessageCounter = 0;
22
  function nextId(): string {
23
+ return `msg-${++uiMessageCounter}`;
24
  }
25
 
26
  /**
27
  * @param pendingApprovalIds - Set of tool_call_ids that are waiting for approval.
28
  * When provided, matching tool calls without results will get state
29
  * 'approval-requested' instead of 'input-available'.
30
+ * @param existingUIMessages - Current UI messages to preserve IDs when content matches.
31
+ * This prevents React from re-rendering messages with new IDs during polling.
32
  */
33
  export function llmMessagesToUIMessages(
34
  messages: LLMMessage[],
35
  pendingApprovalIds?: Set<string>,
36
+ existingUIMessages?: UIMessage[],
37
  ): UIMessage[] {
38
  // Build a map of tool_call_id -> tool result for pairing
39
  const toolResults = new Map<string, { output: string; isError: boolean }>();
 
48
 
49
  const uiMessages: UIMessage[] = [];
50
 
51
+ // Helper to get existing message ID at a given position if roles match
52
+ const getExistingId = (index: number, role: 'user' | 'assistant'): string | null => {
53
+ if (!existingUIMessages || index >= existingUIMessages.length) return null;
54
+ const existing = existingUIMessages[index];
55
+ return existing.role === role ? existing.id : null;
56
+ };
57
+
58
  for (const msg of messages) {
59
  if (msg.role === 'system') continue;
60
  if (msg.role === 'tool') continue; // handled via tool_calls pairing
61
 
62
  if (msg.role === 'user') {
63
+ // Try to reuse existing ID if the message at this position matches
64
+ const existingId = getExistingId(uiMessages.length, 'user');
65
  uiMessages.push({
66
+ id: existingId || nextId(),
67
  role: 'user',
68
  parts: [{ type: 'text', text: msg.content || '' }],
69
  });
 
121
  // per LLM API call), so merge consecutive assistant messages to match.
122
  const prev = uiMessages[uiMessages.length - 1];
123
  if (prev && prev.role === 'assistant') {
124
+ console.log('[convert-llm] 🔗 Merging consecutive assistant messages:', {
125
+ prevId: prev.id,
126
+ prevPartCount: prev.parts.length,
127
+ newPartCount: parts.length,
128
+ });
129
  prev.parts.push(...parts);
130
  } else {
131
+ // Try to reuse existing ID if the message at this position matches
132
+ const existingId = getExistingId(uiMessages.length, 'assistant');
133
+ const newId = existingId || nextId();
134
+ console.log('[convert-llm] ➕ Creating new assistant message:', {
135
+ id: newId,
136
+ partCount: parts.length,
137
+ reusedId: !!existingId,
138
+ });
139
  uiMessages.push({
140
+ id: newId,
141
  role: 'assistant',
142
  parts,
143
  });