| import streamlit as st |
| from google import genai |
| from google.genai import types |
| from PyPDF2 import PdfReader |
| import os |
|
|
| |
| GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") |
|
|
| |
| if not GEMINI_API_KEY: |
| st.error("❌ No se encontró la API Key. Agrega 'GEMINI_API_KEY' como secreto en Hugging Face.") |
| st.stop() |
|
|
| |
| def extract_text_from_pdf(pdf_file): |
| try: |
| pdf_reader = PdfReader(pdf_file) |
| text = "" |
| for page in pdf_reader.pages: |
| text += page.extract_text() + "\n" |
| return text |
| except Exception as e: |
| return f"Se produjo un error al leer el PDF: {e}" |
|
|
| |
| def generate_chat_response(user_input): |
| try: |
| client = genai.Client(api_key=GEMINI_API_KEY) |
| contents = [ |
| types.Content( |
| role="user", |
| parts=[types.Part.from_text(text=user_input)], |
| ) |
| ] |
| config = types.GenerateContentConfig( |
| temperature=0.7, |
| top_p=0.95, |
| top_k=64, |
| max_output_tokens=65536, |
| response_mime_type="text/plain", |
| ) |
| response_text = "" |
| for chunk in client.models.generate_content_stream( |
| model="gemini-2.0-flash-thinking-exp-01-21", |
| contents=contents, |
| config=config, |
| ): |
| response_text += chunk.text |
| return response_text |
| except Exception as e: |
| return f"Se produjo un error: {e}" |
|
|
| |
| def generate_pdf_response(context, question): |
| try: |
| client = genai.Client(api_key=GEMINI_API_KEY) |
| contents = [ |
| types.Content( |
| role="user", |
| parts=[types.Part.from_text(text=f"Contexto: {context}\n\nPregunta: {question}")], |
| ) |
| ] |
| config = types.GenerateContentConfig( |
| temperature=0.7, |
| top_p=0.95, |
| top_k=64, |
| max_output_tokens=65536, |
| response_mime_type="text/plain", |
| ) |
| response_text = "" |
| for chunk in client.models.generate_content_stream( |
| model="gemini-2.0-flash-thinking-exp-01-21", |
| contents=contents, |
| config=config, |
| ): |
| response_text += chunk.text |
| return response_text |
| except Exception as e: |
| return f"Se produjo un error: {e}" |
|
|
| |
| st.title("🧠 Gemini LLM App") |
|
|
| |
| option = st.radio("¿Qué deseas hacer?", ("💬 Hablar con el chat", "📄 Subir y preguntar sobre un PDF")) |
|
|
| |
| if option == "💬 Hablar con el chat": |
| user_input = st.text_area("Introduce tu mensaje:", placeholder="Escribe algo aquí...") |
| if st.button("Obtener Respuesta"): |
| if user_input.strip(): |
| with st.spinner("Pensando..."): |
| response = generate_chat_response(user_input) |
| st.success("¡Respuesta generada!") |
| st.write(response) |
| else: |
| st.error("Por favor, escribe algo antes de enviar.") |
|
|
| |
| elif option == "📄 Subir y preguntar sobre un PDF": |
| uploaded_file = st.file_uploader("Sube un archivo PDF", type="pdf") |
| if uploaded_file: |
| with st.spinner("Extrayendo texto del PDF..."): |
| pdf_text = extract_text_from_pdf(uploaded_file) |
|
|
| if pdf_text: |
| st.success("Texto extraído correctamente.") |
| st.text_area("Contenido del PDF (vista previa):", pdf_text[:1000], height=200) |
| user_question = st.text_area("Haz una pregunta sobre el contenido del PDF:", placeholder="Ej. ¿De qué trata este documento?") |
| if st.button("Obtener Respuesta"): |
| if user_question.strip(): |
| with st.spinner("Generando respuesta..."): |
| response = generate_pdf_response(pdf_text, user_question) |
| st.success("¡Respuesta generada!") |
| st.write(response) |
| else: |
| st.error("Escribe una pregunta para continuar.") |
| else: |
| st.error("No se pudo extraer texto. Intenta con otro archivo.") |
| else: |
| st.info("Por favor, sube un archivo PDF.") |
|
|