Streamlit / app.py
Archit0030's picture
Update app.py
f2b9556 verified
# import subprocess
# import sys
# # Ensure required libraries are installed
# packages = ["torch", "transformers", "peft", "streamlit", "sentencepiece"]
# for package in packages:
# subprocess.run([sys.executable, "-m", "pip", "install", package])
# import torch
# from transformers import BartTokenizer, T5Tokenizer
# from peft import PeftModel
# from transformers import BartForConditionalGeneration, T5ForConditionalGeneration
# import streamlit as st
# # 1. Load model and tokenizer
# model_path = 'finetuned_final_t5'
# tokenizer = T5Tokenizer.from_pretrained(model_path)
# base_model = T5ForConditionalGeneration.from_pretrained('finetuned_final_t5')
# model = PeftModel.from_pretrained(base_model, model_path)
# model = model.merge_and_unload() # Merge LoRA adapters
# # 2. Set up device
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = model.to(device)
# model.eval()
# # 3. Prediction function
# def predict_actions(instruction):
# # Tokenize input
# inputs = tokenizer(
# instruction,
# max_length=128,
# truncation=True,
# padding="max_length",
# return_tensors="pt"
# ).to(device)
# # Generate actions
# with torch.no_grad():
# outputs = model.generate(
# input_ids=inputs.input_ids,
# attention_mask=inputs.attention_mask,
# max_length=64,
# )
# decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
# decoded = decoded.lower() # Force lowercase
# return [action.strip() for action in decoded.split() if action.strip()]
# # Streamlit interface
# st.title("Robotic Action Predictor")
# # Input text box
# instruction = st.text_input("Enter your instruction:", "")
# # Predict button
# if st.button("Predict Actions"):
# if instruction:
# try:
# actions = predict_actions(instruction)
# st.subheader("Predicted Actions:")
# st.write(", ".join(actions))
# except Exception as e:
# st.error(f"Error: {str(e)}")
# else:
# st.warning("Please enter a valid instruction")
import subprocess
import sys
# 📦 Ensure required libraries are installed
packages = [
"streamlit", "ultralytics", "torch", "opencv-python",
"numpy", "Pillow"
]
for package in packages:
subprocess.run([sys.executable, "-m", "pip", "install", package])
import streamlit as st
from ultralytics import YOLO
import torch
import cv2
import numpy as np
import os
from PIL import Image
# Load YOLO models once at startup
@st.cache_resource
def load_models():
return {
"crack": YOLO('best_crack.pt'),
"fungi": YOLO('best_fungi.pt'),
"ncf": YOLO('best_norm_crem_fiss.pt'),
}
models = load_models()
fungi_class_weights = {
"no_fungi": 0,
"mid_fungi": 5,
"fungi": 10
}
st.title("Tongue Image Analysis")
# --- INPUT SELECTION ---
use_camera = st.checkbox("📷 Capture Image with Camera")
if use_camera:
img_data = st.camera_input("Take a photo")
else:
img_data = st.file_uploader("Upload an image", type=['png','jpg','jpeg'])
# Only proceed if we have an image
if img_data:
# Load PIL image
image = Image.open(img_data)
st.image(image, caption="Input Image", use_column_width=True)
# Save temporarily for YOLO (overwrites on each run)
arr = np.array(image)
temp_path = "temp_input.png"
cv2.imwrite(temp_path, cv2.cvtColor(arr, cv2.COLOR_RGB2BGR))
# --- 1) Crack Detection ---
st.subheader("🔍 Crack Detection")
crack_model = models["crack"]
crack_names = crack_model.names
results = crack_model.predict(source=temp_path, save=False, stream=True,
conf=0.001, iou=0.99, device='cpu')
for r in results:
scores = torch.zeros(len(crack_names))
for box in r.boxes:
c = int(box.cls[0]); s = float(box.conf[0])
scores[c] = max(scores[c], s)
crack_c = scores[crack_names.index("crack")] if "crack" in crack_names else 0.0
non_c = 0.0
# handle variants
for name in ["non_crack","non crack"]:
if name in crack_names:
non_c = max(non_c, float(scores[crack_names.index(name)]))
for idx, sc in enumerate(scores):
st.write(f"➤ {crack_names[idx]}: {sc:.3f}")
total = (crack_c * 10)/(crack_c + non_c) if (crack_c+non_c)>0 else 0.0
st.success(f"✅ Total Crack Score: {total:.2f}")
# --- 2) Fungi Detection ---
st.subheader("🧪 Fungi Detection")
fungi_model = models["fungi"]
fungi_names = fungi_model.names
results = fungi_model.predict(source=temp_path, save=False, stream=True,
conf=0.001, iou=0.99, device='cpu')
for r in results:
sums = torch.zeros(len(fungi_names))
for box in r.boxes:
c = int(box.cls[0]); s = float(box.conf[0])
sums[c] += s
wsum = 0.0; tsum = 0.0
for idx, total_conf in enumerate(sums):
name = fungi_names[idx]
st.write(f"➤ {name}: total_conf = {total_conf:.3f}")
wsum += total_conf * fungi_class_weights.get(name, 0)
tsum += total_conf
avg = wsum/tsum if tsum>0 else 0.0
st.success(f"🧪 Weighted Average Fungi Score: {avg:.2f}")
# --- 3) Normal/Crescent/Fissure ---
st.subheader("📊 Normal / Crescent / Fissure Detection")
ncf_model = models["ncf"]
ncf_names = ncf_model.names
results = ncf_model.predict(source=temp_path, save=False, stream=True,
conf=0.001, iou=0.99, device='cpu')
best_conf, best_cls = 0.0, None
for r in results:
for box in r.boxes:
c = int(box.cls[0]); s = float(box.conf[0])
if s > best_conf:
best_conf, best_cls = s, ncf_names[c]
if best_cls:
st.success(f"✅ Predicted Class: {best_cls} ({best_conf:.2f})")
else:
st.warning("⚠️ No class detected.")
# Clean up
os.remove(temp_path)
else:
st.info("Please upload an image or check “Capture Image with Camera” to take one.")