| |
| import gradio as gr |
|
|
| class MobileUXHandlers: |
| def __init__(self, orchestrator): |
| self.orchestrator = orchestrator |
| self.mobile_state = {} |
| |
| async def handle_mobile_submit(self, message, chat_history, session_id, |
| show_reasoning, show_agent_trace, request: gr.Request): |
| """ |
| Mobile-optimized submission handler with enhanced UX |
| """ |
| |
| user_agent = request.headers.get("user-agent", "").lower() |
| is_mobile = any(device in user_agent for device in ['mobile', 'android', 'iphone']) |
| |
| |
| if is_mobile: |
| return await self._mobile_optimized_processing( |
| message, chat_history, session_id, show_reasoning, show_agent_trace |
| ) |
| else: |
| return await self._desktop_processing( |
| message, chat_history, session_id, show_reasoning, show_agent_trace |
| ) |
| |
| async def _mobile_optimized_processing(self, message, chat_history, session_id, |
| show_reasoning, show_agent_trace): |
| """ |
| Mobile-specific processing with enhanced UX feedback |
| """ |
| try: |
| |
| yield { |
| "chatbot": chat_history + [[message, "Thinking..."]], |
| "message_input": "", |
| "reasoning_display": {"status": "processing"}, |
| "performance_display": {"status": "processing"} |
| } |
| |
| |
| result = await self.orchestrator.process_request( |
| session_id=session_id, |
| user_input=message, |
| mobile_optimized=True, |
| max_tokens=800 |
| ) |
| |
| |
| formatted_response = self._format_for_mobile( |
| result['final_response'], |
| show_reasoning and result.get('metadata', {}).get('reasoning_chain'), |
| show_agent_trace and result.get('agent_trace') |
| ) |
| |
| |
| updated_history = chat_history + [[message, formatted_response]] |
| |
| yield { |
| "chatbot": updated_history, |
| "message_input": "", |
| "reasoning_display": result.get('metadata', {}).get('reasoning_chain', {}), |
| "performance_display": result.get('performance_metrics', {}) |
| } |
| |
| except Exception as e: |
| |
| error_response = self._get_mobile_friendly_error(e) |
| yield { |
| "chatbot": chat_history + [[message, error_response]], |
| "message_input": message, |
| "reasoning_display": {"error": "Processing failed"}, |
| "performance_display": {"error": str(e)} |
| } |
| |
| def _format_for_mobile(self, response, reasoning_chain, agent_trace): |
| """ |
| Format response for optimal mobile readability |
| """ |
| |
| if len(response) > 400: |
| paragraphs = self._split_into_paragraphs(response, max_length=300) |
| response = "\n\n".join(paragraphs) |
| |
| |
| formatted = f""" |
| <div class="mobile-response"> |
| {response} |
| </div> |
| """ |
| |
| |
| if reasoning_chain: |
| |
| if isinstance(reasoning_chain, dict): |
| |
| chain_of_thought = reasoning_chain.get('chain_of_thought', {}) |
| if chain_of_thought: |
| first_step = list(chain_of_thought.values())[0] if chain_of_thought else {} |
| hypothesis = first_step.get('hypothesis', 'Processing...') |
| reasoning_text = f"Hypothesis: {hypothesis}" |
| else: |
| reasoning_text = "Enhanced reasoning chain available" |
| else: |
| |
| reasoning_text = str(reasoning_chain)[:200] |
| |
| formatted += f""" |
| <div class="reasoning-mobile" style="margin-top: 15px; padding: 10px; background: #f5f5f5; border-radius: 8px; font-size: 14px;"> |
| <strong>Reasoning:</strong> {reasoning_text}... |
| </div> |
| """ |
| |
| return formatted |
| |
| def _get_mobile_friendly_error(self, error): |
| """ |
| User-friendly error messages for mobile |
| """ |
| error_messages = { |
| "timeout": "⏱️ Taking longer than expected. Please try a simpler question.", |
| "network": "📡 Connection issue. Check your internet and try again.", |
| "rate_limit": "🚦 Too many requests. Please wait a moment.", |
| "default": "❌ Something went wrong. Please try again." |
| } |
| |
| error_type = "default" |
| if "timeout" in str(error).lower(): |
| error_type = "timeout" |
| elif "network" in str(error).lower() or "connection" in str(error).lower(): |
| error_type = "network" |
| elif "rate" in str(error).lower(): |
| error_type = "rate_limit" |
| |
| return error_messages[error_type] |
| |
| async def _desktop_processing(self, message, chat_history, session_id, |
| show_reasoning, show_agent_trace): |
| """ |
| Desktop processing without mobile optimizations |
| """ |
| |
| return { |
| "chatbot": chat_history, |
| "message_input": "", |
| "reasoning_display": {}, |
| "performance_display": {} |
| } |
| |
| def _split_into_paragraphs(self, text, max_length=300): |
| """ |
| Split text into mobile-friendly paragraphs |
| """ |
| |
| words = text.split() |
| paragraphs = [] |
| current_para = [] |
| |
| for word in words: |
| current_para.append(word) |
| if len(' '.join(current_para)) > max_length: |
| paragraphs.append(' '.join(current_para[:-1])) |
| current_para = [current_para[-1]] |
| |
| if current_para: |
| paragraphs.append(' '.join(current_para)) |
| |
| return paragraphs |
|
|