| |
| |
|
|
| import streamlit as st |
| import re |
| import io |
| from io import BytesIO |
| import pandas as pd |
| import numpy as np |
| import time |
| import matplotlib.pyplot as plt |
| from datetime import datetime, timedelta |
| from spacy import displacy |
| import random |
| import base64 |
| import seaborn as sns |
| import logging |
|
|
| |
| |
| from ..database.semantic_mongo_db import get_student_semantic_analysis |
| from ..database.discourse_mongo_db import get_student_discourse_analysis |
| from ..database.chat_mongo_db import get_chat_history |
| from ..database.current_situation_mongo_db import get_current_situation_analysis |
| from ..database.claude_recommendations_mongo_db import get_claude_recommendations |
| from ..database.semantic_mongo_live_db import get_student_semantic_live_analysis |
|
|
| |
| from ..utils.widget_utils import generate_unique_key |
|
|
| logger = logging.getLogger(__name__) |
|
|
| |
|
|
| def display_student_activities(username: str, lang_code: str, t: dict): |
| """ |
| Muestra todas las actividades del estudiante |
| Args: |
| username: Nombre del estudiante |
| lang_code: Código del idioma |
| t: Diccionario de traducciones |
| """ |
| try: |
| |
| |
|
|
| |
| |
| tabs = st.tabs([ |
| t.get('semantic_live_activities', 'Registros de análisis en vivo'), |
| t.get('semantic_activities', 'Registros de mis análisis semánticos'), |
| t.get('discourse_activities', 'Registros de mis análisis comparado de textos'), |
| t.get('chat_activities', 'Registros de mis conversaciones con el tutor virtual') |
| ]) |
|
|
| |
| with tabs[0]: |
| display_semantic_live_activities(username, t) |
| |
| with tabs[1]: |
| display_semantic_activities(username, t) |
|
|
| |
| with tabs[2]: |
| display_discourse_activities(username, t) |
| |
| |
| with tabs[3]: |
| display_chat_activities(username, t) |
|
|
| except Exception as e: |
| logger.error(f"Error mostrando actividades: {str(e)}") |
| st.error(t.get('error_loading_activities', 'Error al cargar las actividades')) |
|
|
|
|
| |
|
|
| def display_semantic_live_activities(username: str, t: dict): |
| """Muestra actividades de análisis semántico en vivo""" |
| try: |
| analyses = get_student_semantic_live_analysis(username) |
| |
| if not analyses: |
| st.info(t.get('no_semantic_live_analyses', 'No hay análisis semánticos en vivo registrados')) |
| return |
|
|
| for analysis in analyses: |
| try: |
| timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00')) |
| formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S") |
| |
| with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False): |
| |
| st.text_area( |
| "Texto analizado", |
| value=analysis.get('text', '')[:200] + ("..." if len(analysis.get('text', '')) > 200 else ""), |
| height=100, |
| disabled=True |
| ) |
| |
| |
| if analysis.get('concept_graph'): |
| try: |
| image_data = analysis['concept_graph'] |
| image_bytes = base64.b64decode(image_data) if isinstance(image_data, str) else image_data |
| |
| st.image( |
| image_bytes, |
| caption=t.get('concept_network', 'Red de Conceptos'), |
| use_container_width=True |
| ) |
| except Exception as img_error: |
| logger.error(f"Error procesando gráfico: {str(img_error)}") |
| st.error(t.get('error_loading_graph', 'Error al cargar el gráfico')) |
|
|
| except Exception as e: |
| logger.error(f"Error procesando análisis individual: {str(e)}") |
| continue |
|
|
| except Exception as e: |
| logger.error(f"Error mostrando análisis semántico en vivo: {str(e)}") |
| st.error(t.get('error_semantic_live', 'Error al mostrar análisis semántico en vivo')) |
| |
| |
| def display_semantic_activities(username: str, t: dict): |
| """Muestra actividades de análisis semántico""" |
| try: |
| logger.info(f"Recuperando análisis semántico para {username}") |
| analyses = get_student_semantic_analysis(username) |
| |
| if not analyses: |
| logger.info("No se encontraron análisis semánticos") |
| st.info(t.get('no_semantic_analyses', 'No hay análisis semánticos registrados')) |
| return |
|
|
| logger.info(f"Procesando {len(analyses)} análisis semánticos") |
| |
| for analysis in analyses: |
| try: |
| |
| if not all(key in analysis for key in ['timestamp', 'concept_graph']): |
| logger.warning(f"Análisis incompleto: {analysis.keys()}") |
| continue |
| |
| |
| timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00')) |
| formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S") |
| |
| |
| with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False): |
| |
| if analysis.get('concept_graph'): |
| try: |
| |
| logger.debug("Decodificando gráfico de conceptos") |
| image_data = analysis['concept_graph'] |
| |
| |
| if isinstance(image_data, bytes): |
| image_bytes = image_data |
| else: |
| |
| image_bytes = base64.b64decode(image_data) |
| |
| logger.debug(f"Longitud de bytes de imagen: {len(image_bytes)}") |
| |
| |
| st.image( |
| image_bytes, |
| caption=t.get('concept_network', 'Red de Conceptos'), |
| use_container_width=True |
| ) |
| logger.debug("Gráfico mostrado exitosamente") |
| |
| except Exception as img_error: |
| logger.error(f"Error procesando gráfico: {str(img_error)}") |
| st.error(t.get('error_loading_graph', 'Error al cargar el gráfico')) |
| else: |
| st.info(t.get('no_graph', 'No hay visualización disponible')) |
|
|
| except Exception as e: |
| logger.error(f"Error procesando análisis individual: {str(e)}") |
| continue |
|
|
| except Exception as e: |
| logger.error(f"Error mostrando análisis semántico: {str(e)}") |
| st.error(t.get('error_semantic', 'Error al mostrar análisis semántico')) |
|
|
|
|
| |
|
|
| def display_discourse_activities(username: str, t: dict): |
| """Muestra actividades de análisis del discurso (mostrado como 'Análisis comparado de textos' en la UI)""" |
| try: |
| logger.info(f"Recuperando análisis del discurso para {username}") |
| analyses = get_student_discourse_analysis(username) |
| |
| if not analyses: |
| logger.info("No se encontraron análisis del discurso") |
| |
| st.info(t.get('no_discourse_analyses', 'No hay análisis comparados de textos registrados')) |
| return |
|
|
| logger.info(f"Procesando {len(analyses)} análisis del discurso") |
| for analysis in analyses: |
| try: |
| |
| if not all(key in analysis for key in ['timestamp']): |
| logger.warning(f"Análisis incompleto: {analysis.keys()}") |
| continue |
|
|
| |
| timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00')) |
| formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S") |
| |
| with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False): |
| |
| col1, col2 = st.columns(2) |
| |
| |
| with col1: |
| st.subheader(t.get('doc1_title', 'Documento 1')) |
| st.markdown(t.get('key_concepts', 'Conceptos Clave')) |
| |
| |
| if 'key_concepts1' in analysis and analysis['key_concepts1']: |
| concepts_html = f""" |
| <div style="display: flex; flex-wrap: nowrap; gap: 8px; padding: 12px; |
| background-color: #f8f9fa; border-radius: 8px; overflow-x: auto; |
| margin-bottom: 15px; white-space: nowrap;"> |
| {''.join([ |
| f'<div style="background-color: white; border-radius: 4px; padding: 6px 10px; display: inline-flex; align-items: center; gap: 4px; box-shadow: 0 1px 2px rgba(0,0,0,0.1); flex-shrink: 0;">' |
| f'<span style="font-weight: 500; color: #1f2937; font-size: 0.85em;">{concept}</span>' |
| f'<span style="color: #6b7280; font-size: 0.75em;">({freq:.2f})</span></div>' |
| for concept, freq in analysis['key_concepts1'] |
| ])} |
| </div> |
| """ |
| st.markdown(concepts_html, unsafe_allow_html=True) |
| else: |
| st.info(t.get('no_concepts', 'No hay conceptos disponibles')) |
| |
| |
| if 'graph1' in analysis: |
| try: |
| if isinstance(analysis['graph1'], bytes): |
| st.image( |
| analysis['graph1'], |
| use_container_width=True |
| ) |
| else: |
| logger.warning(f"graph1 no es bytes: {type(analysis['graph1'])}") |
| st.warning(t.get('graph_not_available', 'Gráfico no disponible')) |
| except Exception as e: |
| logger.error(f"Error mostrando graph1: {str(e)}") |
| st.error(t.get('error_loading_graph', 'Error al cargar el gráfico')) |
| else: |
| st.info(t.get('no_visualization', 'No hay visualización disponible')) |
| |
| |
| st.markdown("**📊 Interpretación del grafo:**") |
| st.markdown(""" |
| - 🔀 Las flechas indican la dirección de la relación entre conceptos |
| - 🎨 Los colores más intensos indican conceptos más centrales en el texto |
| - ⭕ El tamaño de los nodos representa la frecuencia del concepto |
| - ↔️ El grosor de las líneas indica la fuerza de la conexión |
| """) |
| |
| |
| with col2: |
| st.subheader(t.get('doc2_title', 'Documento 2')) |
| st.markdown(t.get('key_concepts', 'Conceptos Clave')) |
| |
| |
| if 'key_concepts2' in analysis and analysis['key_concepts2']: |
| concepts_html = f""" |
| <div style="display: flex; flex-wrap: nowrap; gap: 8px; padding: 12px; |
| background-color: #f8f9fa; border-radius: 8px; overflow-x: auto; |
| margin-bottom: 15px; white-space: nowrap;"> |
| {''.join([ |
| f'<div style="background-color: white; border-radius: 4px; padding: 6px 10px; display: inline-flex; align-items: center; gap: 4px; box-shadow: 0 1px 2px rgba(0,0,0,0.1); flex-shrink: 0;">' |
| f'<span style="font-weight: 500; color: #1f2937; font-size: 0.85em;">{concept}</span>' |
| f'<span style="color: #6b7280; font-size: 0.75em;">({freq:.2f})</span></div>' |
| for concept, freq in analysis['key_concepts2'] |
| ])} |
| </div> |
| """ |
| st.markdown(concepts_html, unsafe_allow_html=True) |
| else: |
| st.info(t.get('no_concepts', 'No hay conceptos disponibles')) |
| |
| |
| if 'graph2' in analysis: |
| try: |
| if isinstance(analysis['graph2'], bytes): |
| st.image( |
| analysis['graph2'], |
| use_container_width=True |
| ) |
| else: |
| logger.warning(f"graph2 no es bytes: {type(analysis['graph2'])}") |
| st.warning(t.get('graph_not_available', 'Gráfico no disponible')) |
| except Exception as e: |
| logger.error(f"Error mostrando graph2: {str(e)}") |
| st.error(t.get('error_loading_graph', 'Error al cargar el gráfico')) |
| else: |
| st.info(t.get('no_visualization', 'No hay visualización disponible')) |
| |
| |
| st.markdown("**📊 Interpretación del grafo:**") |
| st.markdown(""" |
| - 🔀 Las flechas indican la dirección de la relación entre conceptos |
| - 🎨 Los colores más intensos indican conceptos más centrales en el texto |
| - ⭕ El tamaño de los nodos representa la frecuencia del concepto |
| - ↔️ El grosor de las líneas indica la fuerza de la conexión |
| """) |
|
|
| except Exception as e: |
| logger.error(f"Error procesando análisis individual: {str(e)}") |
| continue |
|
|
| except Exception as e: |
| logger.error(f"Error mostrando análisis del discurso: {str(e)}") |
| |
| st.error(t.get('error_discourse', 'Error al mostrar análisis comparado de textos')) |
|
|
|
|
|
|
| |
|
|
| def display_discourse_comparison(analysis: dict, t: dict): |
| """ |
| Muestra la comparación de conceptos clave en análisis del discurso. |
| Formato horizontal simplificado. |
| """ |
| st.subheader(t.get('comparison_results', 'Resultados de la comparación')) |
| |
| |
| if not ('key_concepts1' in analysis and analysis['key_concepts1']): |
| st.info(t.get('no_concepts', 'No hay conceptos disponibles para comparar')) |
| return |
| |
| |
| st.markdown(f"**{t.get('concepts_text_1', 'Conceptos Texto 1')}:**") |
| try: |
| |
| if isinstance(analysis['key_concepts1'], list) and len(analysis['key_concepts1']) > 0: |
| if isinstance(analysis['key_concepts1'][0], list) and len(analysis['key_concepts1'][0]) == 2: |
| |
| concepts_text = ", ".join([f"{c[0]} ({c[1]})" for c in analysis['key_concepts1'][:10]]) |
| st.markdown(f"*{concepts_text}*") |
| else: |
| |
| st.markdown(", ".join(str(c) for c in analysis['key_concepts1'][:10])) |
| else: |
| st.write(str(analysis['key_concepts1'])) |
| except Exception as e: |
| logger.error(f"Error mostrando key_concepts1: {str(e)}") |
| st.error(t.get('error_concepts1', 'Error mostrando conceptos del Texto 1')) |
| |
| |
| st.markdown(f"**{t.get('concepts_text_2', 'Conceptos Texto 2')}:**") |
| if 'key_concepts2' in analysis and analysis['key_concepts2']: |
| try: |
| |
| if isinstance(analysis['key_concepts2'], list) and len(analysis['key_concepts2']) > 0: |
| if isinstance(analysis['key_concepts2'][0], list) and len(analysis['key_concepts2'][0]) == 2: |
| |
| concepts_text = ", ".join([f"{c[0]} ({c[1]})" for c in analysis['key_concepts2'][:10]]) |
| st.markdown(f"*{concepts_text}*") |
| else: |
| |
| st.markdown(", ".join(str(c) for c in analysis['key_concepts2'][:10])) |
| else: |
| st.write(str(analysis['key_concepts2'])) |
| except Exception as e: |
| logger.error(f"Error mostrando key_concepts2: {str(e)}") |
| st.error(t.get('error_concepts2', 'Error mostrando conceptos del Texto 2')) |
| else: |
| st.info(t.get('no_concepts2', 'No hay conceptos disponibles para el Texto 2')) |
|
|
|
|
| |
| def display_chat_activities(username: str, t: dict): |
| """ |
| Muestra historial de conversaciones del chat |
| """ |
| try: |
| |
| chat_history = get_chat_history( |
| username=username, |
| analysis_type='sidebar', |
| limit=50 |
| ) |
| |
| if not chat_history: |
| st.info(t.get('no_chat_history', 'No hay conversaciones registradas')) |
| return |
|
|
| for chat in reversed(chat_history): |
| try: |
| |
| timestamp = datetime.fromisoformat(chat['timestamp'].replace('Z', '+00:00')) |
| formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S") |
| |
| with st.expander( |
| f"{t.get('chat_date', 'Fecha de conversación')}: {formatted_date}", |
| expanded=False |
| ): |
| if 'messages' in chat and chat['messages']: |
| |
| for message in chat['messages']: |
| role = message.get('role', 'unknown') |
| content = message.get('content', '') |
| |
| |
| with st.chat_message(role): |
| st.markdown(content) |
| |
| |
| st.divider() |
| else: |
| st.warning(t.get('invalid_chat_format', 'Formato de chat no válido')) |
| |
| except Exception as e: |
| logger.error(f"Error mostrando conversación: {str(e)}") |
| continue |
|
|
| except Exception as e: |
| logger.error(f"Error mostrando historial del chat: {str(e)}") |
| st.error(t.get('error_chat', 'Error al mostrar historial del chat')) |
| |
| |
|
|