Spaces:
Sleeping
Sleeping
| import os | |
| import requests | |
| import random | |
| import io | |
| from datetime import datetime | |
| from typing import Optional | |
| from PIL import Image | |
| # Configuración de carpetas | |
| OUTPUT_DIR = "generated_images" | |
| os.makedirs(OUTPUT_DIR, exist_ok=True) | |
| def generate_image_from_prompt( | |
| prompt: str, | |
| negative_prompt: str = "", | |
| model_name: str = "ignored", # Este argumento lo ignoramos | |
| seed: Optional[int] = None, | |
| ) -> tuple[Optional[str], str]: | |
| # 1. VALIDACIÓN DE CREDENCIALES | |
| # Usamos HF_TOKEN que ya está en el environment de HuggingFace Spaces | |
| api_key = os.getenv("HF_TOKEN") | |
| if not api_key: | |
| return None, "❌ Error Crítico: No existe HF_TOKEN en el entorno." | |
| api_key = api_key.strip() # Limpieza de seguridad | |
| # 2. DEFINICIÓN DE MODELOS (Principal y Respaldo) | |
| # Usando modelos que funcionan con HuggingFace Inference API | |
| primary_model = "black-forest-labs/FLUX.1-schnell" | |
| backup_model = "stabilityai/stable-diffusion-2-1" | |
| models_to_try = [primary_model, backup_model] | |
| last_error = "" | |
| # 3. BUCLE DE INTENTOS | |
| for model in models_to_try: | |
| try: | |
| print(f"🔄 Intentando generar con modelo: {model}...") | |
| # URL de HuggingFace Inference API | |
| api_url = f"https://router.huggingface.co/models/{model}" | |
| headers = { | |
| "Authorization": f"Bearer {api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| # Payload para generación de imágenes | |
| payload = { | |
| "inputs": prompt, | |
| } | |
| # Añadir negative_prompt si existe | |
| if negative_prompt: | |
| payload["negative_prompt"] = negative_prompt | |
| response = requests.post( | |
| api_url, | |
| headers=headers, | |
| json=payload, | |
| timeout=60 # Timeout más largo para generación de imágenes | |
| ) | |
| # Si hay error, pasamos al siguiente modelo | |
| if response.status_code != 200: | |
| error_detail = response.text | |
| print(f"⚠️ Fallo con {model}: {error_detail}") | |
| last_error = f"Error {response.status_code} en {model}: {error_detail}" | |
| continue # Salta al siguiente modelo | |
| # Si es 200 OK, procesamos la imagen | |
| # HuggingFace Inference API devuelve la imagen directamente como bytes | |
| image_bytes = response.content | |
| # Verificar que recibimos una imagen válida | |
| try: | |
| image = Image.open(io.BytesIO(image_bytes)) | |
| # Guardar la imagen | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| if seed is None: seed = random.randint(0, 999) | |
| filename = f"sofia_{timestamp}_{seed}.png" | |
| file_path = os.path.join(OUTPUT_DIR, filename) | |
| image.save(file_path) | |
| return file_path, f"✅ ÉXITO: Imagen creada con {model}" | |
| except Exception as img_error: | |
| last_error = f"Error al procesar imagen de {model}: {str(img_error)}" | |
| continue | |
| except Exception as e: | |
| last_error = f"Excepción técnica con {model}: {str(e)}" | |
| continue | |
| # Si llega aquí, fallaron todos los modelos | |
| return None, f"❌ ERROR FATAL: Fallaron todos los intentos.\nÚltimo error: {last_error}" |