Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from PIL import Image | |
| import matplotlib.pyplot as plt | |
| import networkx as nx | |
| import json | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| torch.cuda.empty_cache() | |
| import os | |
| import numpy as np | |
| from pipeline.detector import detect_symbols_and_lines | |
| from pipeline.graph_builder import build_graph | |
| from pipeline.gnn_model import run_gnn | |
| from pipeline.agent import generate_agent_actions | |
| st.set_page_config(layout="wide") | |
| st.title("?? Agentic Predictive Maintenance (P&ID Graph + GNN)") | |
| # ===== Initialize Session State ===== | |
| for key, default in { | |
| "G": None, | |
| "feature_map": {}, | |
| "scores": {}, | |
| "fig": None, | |
| "actions": [], | |
| "deepseek_responses": [], | |
| }.items(): | |
| if key not in st.session_state: | |
| st.session_state[key] = default | |
| # ===== Redisplay Previous Outputs ===== | |
| if st.session_state["fig"]: | |
| st.subheader("?? Previous Graph Visualization") | |
| st.pyplot(st.session_state["fig"]) | |
| if st.session_state["actions"]: | |
| st.subheader("??? Previous Agent Actions") | |
| for action in st.session_state["actions"]: | |
| st.write(action) | |
| if st.session_state["deepseek_responses"]: | |
| st.subheader("?? Previous DeepSeek Responses") | |
| for r in st.session_state["deepseek_responses"]: | |
| st.markdown(f"**You:** {r['query']}") | |
| st.markdown(f"**DeepSeek:** {r['answer']}") | |
| # ===== Upload and Analyze Image ===== | |
| uploaded_file = st.file_uploader("Upload a P&ID Image", type=["png", "jpg", "jpeg"]) | |
| if uploaded_file: | |
| image = Image.open(uploaded_file) | |
| st.image(image, caption="P&ID Diagram", use_column_width=True) | |
| if st.button("?? Run Detection and Analysis"): | |
| detections, annotations, class_names = detect_symbols_and_lines(image) | |
| graph = build_graph(image, detections, annotations, class_names) | |
| st.info("Running anomaly detection on the graph...") | |
| fig, feature_map, red_nodes, central_node, scores, G = run_gnn() | |
| st.session_state.G = G | |
| st.session_state.feature_map = feature_map | |
| st.session_state.scores = scores | |
| st.session_state.fig = fig | |
| st.pyplot(fig) | |
| actions = generate_agent_actions(fig, feature_map, red_nodes, central_node, scores) | |
| st.session_state.actions = actions | |
| for action in actions: | |
| st.write(action) | |
| # ===== DeepSeek Local Model Setup ===== | |
| def load_deepseek_model(): | |
| model_name = "deepseek-ai/deepseek-coder-1.3b-instruct" # lightweight option | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16, | |
| device_map="cuda", | |
| trust_remote_code=True | |
| ) | |
| return model, tokenizer | |
| # ===== DeepSeek Q&A ===== | |
| st.subheader("?? Ask Questions About the Graph (DeepSeek Local)") | |
| user_query = st.chat_input("Ask a question about the graph...") | |
| if user_query: | |
| G = st.session_state.get("G") | |
| feature_map = st.session_state.get("feature_map", {}) | |
| scores = st.session_state.get("scores", {}) | |
| if G and feature_map and scores: | |
| graph_data = { | |
| "nodes": [ | |
| { | |
| "id": str(i), | |
| "label": feature_map.get(i, f"Node {i}"), | |
| "score": float(scores.get(i, 0.0)) | |
| } | |
| for i in G.nodes() | |
| ], | |
| "edges": [ | |
| {"source": str(u), "target": str(v)} | |
| for u, v in G.edges() | |
| ] | |
| } | |
| prompt = ( | |
| "You are an expert graph analyst. Analyze this P&ID graph and answer the question.\n\n" | |
| "### Graph Data:\n" | |
| f"{json.dumps(graph_data, indent=2)}\n\n" | |
| "### Question:\n" | |
| f"{user_query}\n\n" | |
| "### Answer:\n" | |
| ) | |
| try: | |
| with st.spinner("Thinking (via DeepSeek Local)..."): | |
| model, tokenizer = load_deepseek_model() | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=128, | |
| temperature=0.7, | |
| do_sample=True | |
| ) | |
| answer = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| answer = answer[len(prompt):].strip() | |
| st.session_state.deepseek_responses.append({ | |
| "query": user_query, | |
| "answer": answer | |
| }) | |
| st.markdown(f"**DeepSeek:** {answer}") | |
| except Exception as e: | |
| st.error(f"DeepSeek error: {e}") | |
| st.error("Ensure enough GPU memory (8GB+ recommended).") | |
| else: | |
| st.warning("?? Please analyze a diagram first to generate a graph.") | |