| |
| """ |
| CompI Phase 3 Final Dashboard Launcher |
| |
| Launch the complete Phase 3 integrated dashboard that combines ALL CompI features: |
| |
| Phase 3.A/3.B: True multimodal fusion with real processing |
| - Real audio transcription and analysis |
| - Actual data processing and visualization |
| - Sentiment analysis and emotion detection |
| - Live real-time data feeds (weather, news) |
| |
| Phase 3.C: Advanced references with role assignment |
| - Multi-image upload and URL support |
| - Style vs structure role assignment |
| - Live ControlNet previews (Canny/Depth) |
| - Hybrid CN+I2I generation modes |
| |
| Phase 3.D: Professional workflow management |
| - Gallery with advanced filtering |
| - Rating, tagging, and annotation system |
| - Preset save/load functionality |
| - Complete export bundles with metadata |
| |
| Phase 3.E: Performance management and model switching |
| - SD 1.5 ↔ SDXL model switching |
| - LoRA integration with scale control |
| - Performance optimizations (xFormers, attention slicing, VAE) |
| - VRAM monitoring and OOM auto-retry |
| - Optional latent upscaling |
| |
| Usage: |
| python run_phase3_final_dashboard.py |
| |
| or |
| |
| streamlit run src/ui/compi_phase3_final_dashboard.py --server.port 8506 |
| """ |
|
|
| import os |
| import sys |
| import subprocess |
| from pathlib import Path |
|
|
| def check_dependencies(): |
| """Check for required dependencies""" |
| print("📦 Checking dependencies...") |
|
|
| required_packages = { |
| "torch": "PyTorch", |
| "diffusers": "Diffusers", |
| "transformers": "Transformers", |
| "accelerate": "Accelerate", |
| "streamlit": "Streamlit", |
| "pillow": "Pillow (PIL)", |
| "numpy": "NumPy", |
| "pandas": "Pandas", |
| "librosa": "Librosa (audio processing)", |
| "matplotlib": "Matplotlib (plotting)", |
| "requests": "Requests (HTTP)", |
| "feedparser": "FeedParser (RSS feeds)", |
| "textblob": "TextBlob (sentiment analysis)" |
| } |
|
|
| |
| opencv_available = False |
| try: |
| import cv2 |
| opencv_available = True |
| required_packages["cv2"] = "OpenCV (image processing)" |
| except ImportError: |
| pass |
|
|
| missing_packages = [] |
| available_packages = [] |
|
|
| for package, name in required_packages.items(): |
| try: |
| __import__(package.replace("-", "_")) |
| available_packages.append(name) |
| except ImportError: |
| if package != "cv2": |
| missing_packages.append(package) |
|
|
| |
| if not opencv_available: |
| missing_packages.append("opencv-python") |
|
|
| print(f"✅ Available: {', '.join(available_packages)}") |
|
|
| if missing_packages: |
| print(f"❌ Missing: {', '.join(missing_packages)}") |
| return False |
|
|
| return True |
|
|
| def check_optional_features(): |
| """Check for optional features""" |
| print("\n🔍 Checking optional features...") |
| |
| |
| try: |
| import whisper |
| print("✅ Whisper available for audio transcription") |
| except ImportError: |
| print("⚠️ Whisper not available (will be installed on first use)") |
| |
| |
| try: |
| from diffusers import StableDiffusionXLPipeline |
| print("✅ SDXL support available") |
| except ImportError: |
| print("⚠️ SDXL not available (requires newer diffusers)") |
| |
| |
| try: |
| from diffusers import StableDiffusionControlNetPipeline |
| print("✅ ControlNet available") |
| except ImportError: |
| print("⚠️ ControlNet not available") |
| |
| |
| try: |
| from diffusers import StableDiffusionLatentUpscalePipeline |
| print("✅ Latent Upscaler available") |
| except ImportError: |
| print("⚠️ Latent Upscaler not available") |
| |
| |
| try: |
| import xformers |
| print("✅ xFormers available for memory optimization") |
| except ImportError: |
| print("⚠️ xFormers not available (optional performance boost)") |
|
|
| def check_gpu_setup(): |
| """Check GPU setup and provide recommendations""" |
| print("\n🔍 Checking GPU setup...") |
| |
| try: |
| import torch |
| |
| if torch.cuda.is_available(): |
| gpu_count = torch.cuda.device_count() |
| gpu_name = torch.cuda.get_device_name(0) |
| total_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3) |
| |
| print(f"✅ CUDA available: {gpu_count} GPU(s)") |
| print(f" Primary GPU: {gpu_name}") |
| print(f" VRAM: {total_memory:.1f} GB") |
| |
| if total_memory >= 12.0: |
| print("✅ Excellent VRAM for all features including SDXL") |
| elif total_memory >= 8.0: |
| print("✅ Good VRAM for SDXL and most features") |
| elif total_memory >= 6.0: |
| print("✅ Sufficient VRAM for SD 1.5 and most features") |
| print("⚠️ SDXL may require optimizations") |
| elif total_memory >= 4.0: |
| print("✅ Minimum VRAM for SD 1.5") |
| print("⚠️ Use aggressive optimizations for best performance") |
| else: |
| print("⚠️ Limited VRAM - consider CPU mode or cloud GPU") |
| |
| return True |
| else: |
| print("⚠️ CUDA not available - will use CPU mode") |
| print("💡 CPU mode is slower but still functional") |
| return False |
| |
| except ImportError: |
| print("❌ PyTorch not found") |
| return False |
|
|
| def install_missing_dependencies(): |
| """Install missing dependencies""" |
| print("\n📦 Installing missing dependencies...") |
| |
| try: |
| |
| core_packages = [ |
| "torch", "torchvision", "torchaudio", |
| "diffusers>=0.21.0", "transformers", "accelerate", |
| "streamlit", "pillow", "numpy", "pandas", |
| "librosa", "opencv-python", "matplotlib", |
| "requests", "feedparser", "textblob" |
| ] |
| |
| print("Installing core packages...") |
| subprocess.check_call([ |
| sys.executable, "-m", "pip", "install" |
| ] + core_packages) |
| |
| print("✅ Core dependencies installed") |
| |
| |
| print("⚠️ Skipping xFormers installation (compatibility issues with current PyTorch version)") |
| |
| return True |
| |
| except subprocess.CalledProcessError as e: |
| print(f"❌ Installation failed: {e}") |
| return False |
|
|
| def main(): |
| """Launch Phase 3 Final Dashboard""" |
| |
| print("🧪 CompI Phase 3 Final Dashboard") |
| print("=" * 80) |
| print() |
| print("🎯 Complete Phase 3 Integration (3.A → 3.E):") |
| print(" • 🧩 Multimodal Inputs: Text, Audio, Data, Emotion, Real-time") |
| print(" • 🖼️ Advanced References: Role assignment, ControlNet, live previews") |
| print(" • ⚙️ Model & Performance: SD 1.5/SDXL, LoRA, VRAM monitoring") |
| print(" • 🎛️ Intelligent Generation: Hybrid modes, OOM recovery") |
| print(" • 🖼️ Professional Gallery: Filtering, rating, annotation") |
| print(" • 💾 Preset Management: Save/load configurations") |
| print(" • 📦 Export System: Complete bundles with metadata") |
| print() |
| |
| |
| ui_file = Path("src/ui/compi_phase3_final_dashboard.py") |
| if not ui_file.exists(): |
| print(f"❌ Error: {ui_file} not found!") |
| print("Make sure you're running this from the project root directory.") |
| return 1 |
| |
| |
| if not check_dependencies(): |
| print("\n❌ Missing dependencies detected.") |
| install = input("Install missing dependencies? (y/n): ").lower().strip() |
| |
| if install == 'y': |
| if not install_missing_dependencies(): |
| print("❌ Failed to install dependencies") |
| return 1 |
| else: |
| print("❌ Cannot proceed without required dependencies") |
| return 1 |
| |
| |
| has_gpu = check_gpu_setup() |
| |
| |
| check_optional_features() |
| |
| print() |
| print("🚀 Launching Phase 3 Final Dashboard...") |
| print("📍 Access at: http://localhost:8506") |
| print() |
| |
| if has_gpu: |
| print("💡 GPU Tips:") |
| print(" • Monitor VRAM usage in the top metrics bar") |
| print(" • Use performance optimizations in Model & Performance tab") |
| print(" • Enable OOM auto-retry for reliability") |
| print(" • Try SDXL for higher quality (requires 8+ GB VRAM)") |
| else: |
| print("💡 CPU Tips:") |
| print(" • Generation will be slower but still functional") |
| print(" • Use smaller image sizes (512x512 or less)") |
| print(" • Reduce inference steps for faster generation") |
| print(" • Stick to SD 1.5 model for best performance") |
| |
| print() |
| print("🎨 Getting Started:") |
| print(" 1. 🧩 Configure multimodal inputs (audio, data, emotion, real-time)") |
| print(" 2. 🖼️ Upload reference images and assign roles (style vs structure)") |
| print(" 3. ⚙️ Choose model and optimize performance settings") |
| print(" 4. 🎛️ Generate with intelligent fusion of all inputs") |
| print(" 5. 🖼️ Review results in gallery and add annotations") |
| print(" 6. 💾 Save presets for reuse") |
| print(" 7. 📦 Export complete bundles with metadata") |
| print() |
| |
| |
| try: |
| cmd = [ |
| sys.executable, "-m", "streamlit", "run", |
| str(ui_file), |
| "--server.port", "8506", |
| "--server.headless", "true", |
| "--browser.gatherUsageStats", "false" |
| ] |
| |
| subprocess.run(cmd) |
| |
| except KeyboardInterrupt: |
| print("\n👋 Phase 3 Final Dashboard stopped by user") |
| return 0 |
| except Exception as e: |
| print(f"❌ Error launching Streamlit: {e}") |
| return 1 |
| |
| return 0 |
|
|
| if __name__ == "__main__": |
| exit_code = main() |
| sys.exit(exit_code) |
|
|