Archit0030 commited on
Commit
f2b9556
·
verified ·
1 Parent(s): b59fa90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +179 -57
app.py CHANGED
@@ -1,66 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import subprocess
2
  import sys
3
 
4
- # Ensure required libraries are installed
5
- packages = ["torch", "transformers", "peft", "streamlit", "sentencepiece"]
 
 
 
6
  for package in packages:
7
  subprocess.run([sys.executable, "-m", "pip", "install", package])
8
 
9
- import torch
10
- from transformers import BartTokenizer, T5Tokenizer
11
- from peft import PeftModel
12
- from transformers import BartForConditionalGeneration, T5ForConditionalGeneration
13
  import streamlit as st
 
 
 
 
 
 
14
 
15
- # 1. Load model and tokenizer
16
- model_path = 'finetuned_final_t5'
17
- tokenizer = T5Tokenizer.from_pretrained(model_path)
18
- base_model = T5ForConditionalGeneration.from_pretrained('finetuned_final_t5')
19
- model = PeftModel.from_pretrained(base_model, model_path)
20
- model = model.merge_and_unload() # Merge LoRA adapters
21
-
22
- # 2. Set up device
23
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
- model = model.to(device)
25
- model.eval()
26
-
27
- # 3. Prediction function
28
- def predict_actions(instruction):
29
- # Tokenize input
30
- inputs = tokenizer(
31
- instruction,
32
- max_length=128,
33
- truncation=True,
34
- padding="max_length",
35
- return_tensors="pt"
36
- ).to(device)
37
-
38
- # Generate actions
39
- with torch.no_grad():
40
- outputs = model.generate(
41
- input_ids=inputs.input_ids,
42
- attention_mask=inputs.attention_mask,
43
- max_length=64,
44
- )
45
-
46
- decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
47
- decoded = decoded.lower() # Force lowercase
48
- return [action.strip() for action in decoded.split() if action.strip()]
49
-
50
- # Streamlit interface
51
- st.title("Robotic Action Predictor")
52
-
53
- # Input text box
54
- instruction = st.text_input("Enter your instruction:", "")
55
-
56
- # Predict button
57
- if st.button("Predict Actions"):
58
- if instruction:
59
- try:
60
- actions = predict_actions(instruction)
61
- st.subheader("Predicted Actions:")
62
- st.write(", ".join(actions))
63
- except Exception as e:
64
- st.error(f"Error: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  else:
66
- st.warning("Please enter a valid instruction")
 
 
 
 
 
 
 
1
+ # import subprocess
2
+ # import sys
3
+
4
+ # # Ensure required libraries are installed
5
+ # packages = ["torch", "transformers", "peft", "streamlit", "sentencepiece"]
6
+ # for package in packages:
7
+ # subprocess.run([sys.executable, "-m", "pip", "install", package])
8
+
9
+ # import torch
10
+ # from transformers import BartTokenizer, T5Tokenizer
11
+ # from peft import PeftModel
12
+ # from transformers import BartForConditionalGeneration, T5ForConditionalGeneration
13
+ # import streamlit as st
14
+
15
+ # # 1. Load model and tokenizer
16
+ # model_path = 'finetuned_final_t5'
17
+ # tokenizer = T5Tokenizer.from_pretrained(model_path)
18
+ # base_model = T5ForConditionalGeneration.from_pretrained('finetuned_final_t5')
19
+ # model = PeftModel.from_pretrained(base_model, model_path)
20
+ # model = model.merge_and_unload() # Merge LoRA adapters
21
+
22
+ # # 2. Set up device
23
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
+ # model = model.to(device)
25
+ # model.eval()
26
+
27
+ # # 3. Prediction function
28
+ # def predict_actions(instruction):
29
+ # # Tokenize input
30
+ # inputs = tokenizer(
31
+ # instruction,
32
+ # max_length=128,
33
+ # truncation=True,
34
+ # padding="max_length",
35
+ # return_tensors="pt"
36
+ # ).to(device)
37
+
38
+ # # Generate actions
39
+ # with torch.no_grad():
40
+ # outputs = model.generate(
41
+ # input_ids=inputs.input_ids,
42
+ # attention_mask=inputs.attention_mask,
43
+ # max_length=64,
44
+ # )
45
+
46
+ # decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
47
+ # decoded = decoded.lower() # Force lowercase
48
+ # return [action.strip() for action in decoded.split() if action.strip()]
49
+
50
+ # # Streamlit interface
51
+ # st.title("Robotic Action Predictor")
52
+
53
+ # # Input text box
54
+ # instruction = st.text_input("Enter your instruction:", "")
55
+
56
+ # # Predict button
57
+ # if st.button("Predict Actions"):
58
+ # if instruction:
59
+ # try:
60
+ # actions = predict_actions(instruction)
61
+ # st.subheader("Predicted Actions:")
62
+ # st.write(", ".join(actions))
63
+ # except Exception as e:
64
+ # st.error(f"Error: {str(e)}")
65
+ # else:
66
+ # st.warning("Please enter a valid instruction")
67
+
68
  import subprocess
69
  import sys
70
 
71
+ # 📦 Ensure required libraries are installed
72
+ packages = [
73
+ "streamlit", "ultralytics", "torch", "opencv-python",
74
+ "numpy", "Pillow"
75
+ ]
76
  for package in packages:
77
  subprocess.run([sys.executable, "-m", "pip", "install", package])
78
 
 
 
 
 
79
  import streamlit as st
80
+ from ultralytics import YOLO
81
+ import torch
82
+ import cv2
83
+ import numpy as np
84
+ import os
85
+ from PIL import Image
86
 
87
+ # Load YOLO models once at startup
88
+ @st.cache_resource
89
+ def load_models():
90
+ return {
91
+ "crack": YOLO('best_crack.pt'),
92
+ "fungi": YOLO('best_fungi.pt'),
93
+ "ncf": YOLO('best_norm_crem_fiss.pt'),
94
+ }
95
+
96
+ models = load_models()
97
+
98
+ fungi_class_weights = {
99
+ "no_fungi": 0,
100
+ "mid_fungi": 5,
101
+ "fungi": 10
102
+ }
103
+
104
+ st.title("Tongue Image Analysis")
105
+
106
+ # --- INPUT SELECTION ---
107
+ use_camera = st.checkbox("📷 Capture Image with Camera")
108
+
109
+ if use_camera:
110
+ img_data = st.camera_input("Take a photo")
111
+ else:
112
+ img_data = st.file_uploader("Upload an image", type=['png','jpg','jpeg'])
113
+
114
+ # Only proceed if we have an image
115
+ if img_data:
116
+ # Load PIL image
117
+ image = Image.open(img_data)
118
+ st.image(image, caption="Input Image", use_column_width=True)
119
+
120
+ # Save temporarily for YOLO (overwrites on each run)
121
+ arr = np.array(image)
122
+ temp_path = "temp_input.png"
123
+ cv2.imwrite(temp_path, cv2.cvtColor(arr, cv2.COLOR_RGB2BGR))
124
+
125
+ # --- 1) Crack Detection ---
126
+ st.subheader("🔍 Crack Detection")
127
+ crack_model = models["crack"]
128
+ crack_names = crack_model.names
129
+ results = crack_model.predict(source=temp_path, save=False, stream=True,
130
+ conf=0.001, iou=0.99, device='cpu')
131
+ for r in results:
132
+ scores = torch.zeros(len(crack_names))
133
+ for box in r.boxes:
134
+ c = int(box.cls[0]); s = float(box.conf[0])
135
+ scores[c] = max(scores[c], s)
136
+ crack_c = scores[crack_names.index("crack")] if "crack" in crack_names else 0.0
137
+ non_c = 0.0
138
+ # handle variants
139
+ for name in ["non_crack","non crack"]:
140
+ if name in crack_names:
141
+ non_c = max(non_c, float(scores[crack_names.index(name)]))
142
+ for idx, sc in enumerate(scores):
143
+ st.write(f"➤ {crack_names[idx]}: {sc:.3f}")
144
+ total = (crack_c * 10)/(crack_c + non_c) if (crack_c+non_c)>0 else 0.0
145
+ st.success(f"✅ Total Crack Score: {total:.2f}")
146
+
147
+ # --- 2) Fungi Detection ---
148
+ st.subheader("🧪 Fungi Detection")
149
+ fungi_model = models["fungi"]
150
+ fungi_names = fungi_model.names
151
+ results = fungi_model.predict(source=temp_path, save=False, stream=True,
152
+ conf=0.001, iou=0.99, device='cpu')
153
+ for r in results:
154
+ sums = torch.zeros(len(fungi_names))
155
+ for box in r.boxes:
156
+ c = int(box.cls[0]); s = float(box.conf[0])
157
+ sums[c] += s
158
+ wsum = 0.0; tsum = 0.0
159
+ for idx, total_conf in enumerate(sums):
160
+ name = fungi_names[idx]
161
+ st.write(f"➤ {name}: total_conf = {total_conf:.3f}")
162
+ wsum += total_conf * fungi_class_weights.get(name, 0)
163
+ tsum += total_conf
164
+ avg = wsum/tsum if tsum>0 else 0.0
165
+ st.success(f"🧪 Weighted Average Fungi Score: {avg:.2f}")
166
+
167
+ # --- 3) Normal/Crescent/Fissure ---
168
+ st.subheader("📊 Normal / Crescent / Fissure Detection")
169
+ ncf_model = models["ncf"]
170
+ ncf_names = ncf_model.names
171
+ results = ncf_model.predict(source=temp_path, save=False, stream=True,
172
+ conf=0.001, iou=0.99, device='cpu')
173
+ best_conf, best_cls = 0.0, None
174
+ for r in results:
175
+ for box in r.boxes:
176
+ c = int(box.cls[0]); s = float(box.conf[0])
177
+ if s > best_conf:
178
+ best_conf, best_cls = s, ncf_names[c]
179
+ if best_cls:
180
+ st.success(f"✅ Predicted Class: {best_cls} ({best_conf:.2f})")
181
  else:
182
+ st.warning("⚠️ No class detected.")
183
+
184
+ # Clean up
185
+ os.remove(temp_path)
186
+
187
+ else:
188
+ st.info("Please upload an image or check “Capture Image with Camera” to take one.")