Dev Nagaich commited on
Commit
8794644
ยท
1 Parent(s): e0ee47c

Fix Error 403 in new config

Browse files
Files changed (4) hide show
  1. .streamlit/config.toml +10 -0
  2. Dockerfile +11 -7
  3. app.py +121 -141
  4. requirements_deploy.txt +15 -27
.streamlit/config.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ [server]
2
+ maxUploadSize = 500
3
+ headless = true
4
+ port = 7860
5
+ address = "0.0.0.0"
6
+
7
+ [browser]
8
+ gatherUsageStats = false
9
+ serverAddress = "0.0.0.0"
10
+ serverPort = 7860
Dockerfile CHANGED
@@ -17,7 +17,7 @@ RUN apt-get update && apt-get install -y \
17
  # Copy requirements first for better caching
18
  COPY requirements_deploy.txt .
19
 
20
- # Install Python dependencies with version constraints
21
  RUN pip install --no-cache-dir --upgrade pip && \
22
  pip install --no-cache-dir -r requirements_deploy.txt
23
 
@@ -35,7 +35,7 @@ RUN wget --no-check-certificate \
35
  https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_small.pt \
36
  -O segment-anything-2/checkpoints/sam2_hiera_small.pt
37
 
38
- # Download VREyeSAM fine-tuned weights using Python
39
  RUN pip install --no-cache-dir huggingface-hub && \
40
  python -c "from huggingface_hub import hf_hub_download; \
41
  hf_hub_download(repo_id='devnagaich/VREyeSAM', \
@@ -44,10 +44,13 @@ RUN pip install --no-cache-dir huggingface-hub && \
44
  local_dir_use_symlinks=False)"
45
 
46
  # Verify files were downloaded
47
- RUN ls -lh segment-anything-2/checkpoints/ && \
48
- test -f segment-anything-2/checkpoints/sam2_hiera_small.pt && \
49
- test -f segment-anything-2/checkpoints/VREyeSAM_uncertainity_best.torch && \
50
- echo "All checkpoints downloaded successfully!"
 
 
 
51
 
52
  # Copy application files
53
  COPY app.py .
@@ -60,9 +63,10 @@ ENV STREAMLIT_SERVER_PORT=7860
60
  ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0
61
  ENV STREAMLIT_SERVER_HEADLESS=true
62
  ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
 
63
 
64
  # Health check
65
  HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health || exit 1
66
 
67
  # Run the application
68
- CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
 
17
  # Copy requirements first for better caching
18
  COPY requirements_deploy.txt .
19
 
20
+ # Install Python dependencies
21
  RUN pip install --no-cache-dir --upgrade pip && \
22
  pip install --no-cache-dir -r requirements_deploy.txt
23
 
 
35
  https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_small.pt \
36
  -O segment-anything-2/checkpoints/sam2_hiera_small.pt
37
 
38
+ # Download VREyeSAM weights using Python
39
  RUN pip install --no-cache-dir huggingface-hub && \
40
  python -c "from huggingface_hub import hf_hub_download; \
41
  hf_hub_download(repo_id='devnagaich/VREyeSAM', \
 
44
  local_dir_use_symlinks=False)"
45
 
46
  # Verify files were downloaded
47
+ RUN ls -lh segment-anything-2/checkpoints/
48
+
49
+ # Create Streamlit config directory
50
+ RUN mkdir -p /root/.streamlit
51
+
52
+ # Copy Streamlit config to increase upload size
53
+ COPY .streamlit/config.toml /root/.streamlit/config.toml
54
 
55
  # Copy application files
56
  COPY app.py .
 
63
  ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0
64
  ENV STREAMLIT_SERVER_HEADLESS=true
65
  ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
66
+ ENV STREAMLIT_SERVER_MAX_UPLOAD_SIZE=500
67
 
68
  # Health check
69
  HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health || exit 1
70
 
71
  # Run the application
72
+ CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0", "--server.maxUploadSize=500"]
app.py CHANGED
@@ -6,7 +6,6 @@ from PIL import Image
6
  import io
7
  import sys
8
  import os
9
- import traceback
10
 
11
  # Add segment-anything-2 to path
12
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), "segment-anything-2"))
@@ -18,7 +17,8 @@ from sam2.sam2_image_predictor import SAM2ImagePredictor
18
  st.set_page_config(
19
  page_title="VREyeSAM - Non-frontal Iris Segmentation",
20
  page_icon="๐Ÿ‘๏ธ",
21
- layout="wide"
 
22
  )
23
 
24
  # Custom CSS
@@ -47,31 +47,13 @@ st.markdown("""
47
  def load_model():
48
  """Load the VREyeSAM model"""
49
  try:
50
- # Path handling for both local and Docker environments
51
  model_cfg = "configs/sam2/sam2_hiera_s.yaml"
52
  sam2_checkpoint = "segment-anything-2/checkpoints/sam2_hiera_small.pt"
53
  fine_tuned_weights = "segment-anything-2/checkpoints/VREyeSAM_uncertainity_best.torch"
54
 
55
- # Verify files exist
56
- if not os.path.exists(sam2_checkpoint):
57
- st.error(f"โŒ SAM2 checkpoint not found at: {sam2_checkpoint}")
58
- st.info("Current directory: " + os.getcwd())
59
- st.info("Directory contents: " + str(os.listdir(".")))
60
- return None
61
-
62
- if not os.path.exists(fine_tuned_weights):
63
- st.error(f"โŒ VREyeSAM weights not found at: {fine_tuned_weights}")
64
- return None
65
-
66
- # Check file sizes
67
- sam2_size = os.path.getsize(sam2_checkpoint) / (1024 * 1024)
68
- vresam_size = os.path.getsize(fine_tuned_weights) / (1024 * 1024)
69
- st.info(f"๐Ÿ“ฆ SAM2 checkpoint: {sam2_size:.1f} MB")
70
- st.info(f"๐Ÿ“ฆ VREyeSAM weights: {vresam_size:.1f} MB")
71
-
72
  # Load model
73
  device = "cuda" if torch.cuda.is_available() else "cpu"
74
- st.info(f"๐Ÿ–ฅ๏ธ Loading model on: {device.upper()}")
75
 
76
  sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device)
77
  predictor = SAM2ImagePredictor(sam2_model)
@@ -79,9 +61,7 @@ def load_model():
79
 
80
  return predictor
81
  except Exception as e:
82
- st.error(f"โŒ Error loading model: {str(e)}")
83
- st.error("Full traceback:")
84
- st.code(traceback.format_exc())
85
  return None
86
 
87
  def read_and_resize_image(image):
@@ -193,10 +173,8 @@ def main():
193
  - Inconsistent lighting conditions
194
 
195
  **Model Performance:**
196
- - Precision: 0.751
197
  - Recall: 0.870
198
  - F1-Score: 0.806
199
- - Mean IoU: 0.647
200
  """)
201
 
202
  st.header("Settings")
@@ -214,7 +192,7 @@ def main():
214
 
215
  st.success("โœ… Model loaded successfully!")
216
 
217
- # File uploader
218
  uploaded_file = st.file_uploader(
219
  "Upload an iris image (JPG, PNG, JPEG)",
220
  type=["jpg", "png", "jpeg"],
@@ -222,132 +200,134 @@ def main():
222
  )
223
 
224
  if uploaded_file is not None:
225
- # Display original image
226
- image = Image.open(uploaded_file)
227
-
228
- col1, col2 = st.columns(2)
229
-
230
- with col1:
231
- st.subheader("๐Ÿ“ท Original Image")
232
- st.image(image, use_container_width=True)
233
-
234
- # Process button
235
- if st.button("๐Ÿ” Segment Iris", type="primary"):
236
- with st.spinner("Segmenting iris..."):
237
- try:
238
- # Prepare image
239
- img_array = read_and_resize_image(image)
240
-
241
- # Perform segmentation
242
- binary_mask, prob_mask = segment_iris(predictor, img_array)
243
-
244
- # Extract iris strip
245
- iris_strip = extract_iris_strip(img_array, binary_mask) if show_iris_strip else None
246
-
247
- with col2:
248
- st.subheader("๐ŸŽฏ Binary Mask")
249
- binary_mask_img = (binary_mask * 255).astype(np.uint8)
250
- st.image(binary_mask_img, use_container_width=True)
251
-
252
- # Additional results
253
- st.markdown("---")
254
- st.subheader("๐Ÿ“Š Segmentation Results")
255
-
256
- result_cols = st.columns(3)
257
-
258
- with result_cols[0]:
259
- if show_overlay:
260
- st.markdown("**Overlay View**")
261
- overlay = overlay_mask_on_image(img_array, binary_mask)
262
- st.image(overlay, use_container_width=True)
263
-
264
- with result_cols[1]:
265
- if show_probabilistic:
266
- st.markdown("**Probabilistic Mask**")
267
- prob_mask_img = (prob_mask * 255).astype(np.uint8)
268
- st.image(prob_mask_img, use_container_width=True)
269
-
270
- with result_cols[2]:
271
- if show_iris_strip and iris_strip is not None:
272
- st.markdown("**Extracted Iris Strip**")
273
- st.image(iris_strip, use_container_width=True)
274
- elif show_iris_strip:
275
- st.warning("No iris region detected")
276
-
277
- # Download options
278
- st.markdown("---")
279
- st.subheader("๐Ÿ’พ Download Results")
280
-
281
- download_cols = st.columns(3)
282
-
283
- with download_cols[0]:
284
- # Binary mask download
285
- binary_pil = Image.fromarray(binary_mask_img)
286
- buf = io.BytesIO()
287
- binary_pil.save(buf, format="PNG")
288
- st.download_button(
289
- label="Download Binary Mask",
290
- data=buf.getvalue(),
291
- file_name="binary_mask.png",
292
- mime="image/png"
293
- )
294
-
295
- with download_cols[1]:
296
- if show_overlay:
297
- # Overlay download
298
- overlay_pil = Image.fromarray(cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB))
299
  buf = io.BytesIO()
300
- overlay_pil.save(buf, format="PNG")
301
  st.download_button(
302
- label="Download Overlay",
303
  data=buf.getvalue(),
304
- file_name="overlay.png",
305
  mime="image/png"
306
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
 
308
- with download_cols[2]:
309
- if iris_strip is not None:
310
- # Iris strip download
311
- strip_pil = Image.fromarray(cv2.cvtColor(iris_strip, cv2.COLOR_BGR2RGB))
312
- buf = io.BytesIO()
313
- strip_pil.save(buf, format="PNG")
314
- st.download_button(
315
- label="Download Iris Strip",
316
- data=buf.getvalue(),
317
- file_name="iris_strip.png",
318
- mime="image/png"
319
- )
320
-
321
- # Statistics
322
- st.markdown("---")
323
- st.subheader("๐Ÿ“ˆ Segmentation Statistics")
324
- stats_cols = st.columns(4)
325
-
326
- mask_area = np.sum(binary_mask > 0)
327
- total_area = binary_mask.shape[0] * binary_mask.shape[1]
328
- coverage = (mask_area / total_area) * 100
329
-
330
- with stats_cols[0]:
331
- st.metric("Mask Coverage", f"{coverage:.2f}%")
332
- with stats_cols[1]:
333
- st.metric("Image Size", f"{img_array.shape[1]}x{img_array.shape[0]}")
334
- with stats_cols[2]:
335
- st.metric("Mask Area (pixels)", f"{mask_area:,}")
336
- with stats_cols[3]:
337
- if iris_strip is not None:
338
- st.metric("Strip Size", f"{iris_strip.shape[1]}x{iris_strip.shape[0]}")
339
-
340
- except Exception as e:
341
- st.error(f"โŒ Error during segmentation: {str(e)}")
342
- st.error("Full traceback:")
343
- st.code(traceback.format_exc())
344
 
345
  # Footer
346
  st.markdown("---")
347
  st.markdown("""
348
  <div style='text-align: center'>
349
  <p><strong>VREyeSAM</strong> - Virtual Reality Non-Frontal Iris Segmentation</p>
350
- <p>Sharma et al., IJCB 2025</p>
351
  <p>๐Ÿ”— <a href='https://github.com/GeetanjaliGTZ/VREyeSAM'>GitHub</a> |
352
  ๐Ÿ“ง <a href='mailto:geetanjalisharma546@gmail.com'>Contact</a></p>
353
  </div>
 
6
  import io
7
  import sys
8
  import os
 
9
 
10
  # Add segment-anything-2 to path
11
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), "segment-anything-2"))
 
17
  st.set_page_config(
18
  page_title="VREyeSAM - Non-frontal Iris Segmentation",
19
  page_icon="๐Ÿ‘๏ธ",
20
+ layout="wide",
21
+ initial_sidebar_state="expanded"
22
  )
23
 
24
  # Custom CSS
 
47
  def load_model():
48
  """Load the VREyeSAM model"""
49
  try:
50
+ # Correct paths as specified
51
  model_cfg = "configs/sam2/sam2_hiera_s.yaml"
52
  sam2_checkpoint = "segment-anything-2/checkpoints/sam2_hiera_small.pt"
53
  fine_tuned_weights = "segment-anything-2/checkpoints/VREyeSAM_uncertainity_best.torch"
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  # Load model
56
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
57
 
58
  sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device)
59
  predictor = SAM2ImagePredictor(sam2_model)
 
61
 
62
  return predictor
63
  except Exception as e:
64
+ st.error(f"Error loading model: {str(e)}")
 
 
65
  return None
66
 
67
  def read_and_resize_image(image):
 
173
  - Inconsistent lighting conditions
174
 
175
  **Model Performance:**
 
176
  - Recall: 0.870
177
  - F1-Score: 0.806
 
178
  """)
179
 
180
  st.header("Settings")
 
192
 
193
  st.success("โœ… Model loaded successfully!")
194
 
195
+ # File uploader with increased size limit
196
  uploaded_file = st.file_uploader(
197
  "Upload an iris image (JPG, PNG, JPEG)",
198
  type=["jpg", "png", "jpeg"],
 
200
  )
201
 
202
  if uploaded_file is not None:
203
+ try:
204
+ # Display original image
205
+ image = Image.open(uploaded_file)
206
+
207
+ col1, col2 = st.columns(2)
208
+
209
+ with col1:
210
+ st.subheader("๐Ÿ“ท Original Image")
211
+ st.image(image, use_container_width=True)
212
+
213
+ # Process button
214
+ if st.button("๐Ÿ” Segment Iris", type="primary"):
215
+ with st.spinner("Segmenting iris..."):
216
+ try:
217
+ # Prepare image
218
+ img_array = read_and_resize_image(image)
219
+
220
+ # Perform segmentation
221
+ binary_mask, prob_mask = segment_iris(predictor, img_array)
222
+
223
+ # Extract iris strip
224
+ iris_strip = extract_iris_strip(img_array, binary_mask) if show_iris_strip else None
225
+
226
+ with col2:
227
+ st.subheader("๐ŸŽฏ Binary Mask")
228
+ binary_mask_img = (binary_mask * 255).astype(np.uint8)
229
+ st.image(binary_mask_img, use_container_width=True)
230
+
231
+ # Additional results
232
+ st.markdown("---")
233
+ st.subheader("๐Ÿ“Š Segmentation Results")
234
+
235
+ result_cols = st.columns(3)
236
+
237
+ with result_cols[0]:
238
+ if show_overlay:
239
+ st.markdown("**Overlay View**")
240
+ overlay = overlay_mask_on_image(img_array, binary_mask)
241
+ st.image(overlay, use_container_width=True)
242
+
243
+ with result_cols[1]:
244
+ if show_probabilistic:
245
+ st.markdown("**Probabilistic Mask**")
246
+ prob_mask_img = (prob_mask * 255).astype(np.uint8)
247
+ st.image(prob_mask_img, use_container_width=True)
248
+
249
+ with result_cols[2]:
250
+ if show_iris_strip and iris_strip is not None:
251
+ st.markdown("**Extracted Iris Strip**")
252
+ st.image(iris_strip, use_container_width=True)
253
+ elif show_iris_strip:
254
+ st.warning("No iris region detected")
255
+
256
+ # Download options
257
+ st.markdown("---")
258
+ st.subheader("๐Ÿ’พ Download Results")
259
+
260
+ download_cols = st.columns(3)
261
+
262
+ with download_cols[0]:
263
+ # Binary mask download
264
+ binary_pil = Image.fromarray(binary_mask_img)
 
 
 
 
 
 
 
 
 
 
 
 
265
  buf = io.BytesIO()
266
+ binary_pil.save(buf, format="PNG")
267
  st.download_button(
268
+ label="Download Binary Mask",
269
  data=buf.getvalue(),
270
+ file_name="binary_mask.png",
271
  mime="image/png"
272
  )
273
+
274
+ with download_cols[1]:
275
+ if show_overlay:
276
+ # Overlay download
277
+ overlay_pil = Image.fromarray(cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB))
278
+ buf = io.BytesIO()
279
+ overlay_pil.save(buf, format="PNG")
280
+ st.download_button(
281
+ label="Download Overlay",
282
+ data=buf.getvalue(),
283
+ file_name="overlay.png",
284
+ mime="image/png"
285
+ )
286
+
287
+ with download_cols[2]:
288
+ if iris_strip is not None:
289
+ # Iris strip download
290
+ strip_pil = Image.fromarray(cv2.cvtColor(iris_strip, cv2.COLOR_BGR2RGB))
291
+ buf = io.BytesIO()
292
+ strip_pil.save(buf, format="PNG")
293
+ st.download_button(
294
+ label="Download Iris Strip",
295
+ data=buf.getvalue(),
296
+ file_name="iris_strip.png",
297
+ mime="image/png"
298
+ )
299
+
300
+ # Statistics
301
+ st.markdown("---")
302
+ st.subheader("๐Ÿ“ˆ Segmentation Statistics")
303
+ stats_cols = st.columns(4)
304
+
305
+ mask_area = np.sum(binary_mask > 0)
306
+ total_area = binary_mask.shape[0] * binary_mask.shape[1]
307
+ coverage = (mask_area / total_area) * 100
308
+
309
+ with stats_cols[0]:
310
+ st.metric("Mask Coverage", f"{coverage:.2f}%")
311
+ with stats_cols[1]:
312
+ st.metric("Image Size", f"{img_array.shape[1]}x{img_array.shape[0]}")
313
+ with stats_cols[2]:
314
+ st.metric("Mask Area (pixels)", f"{mask_area:,}")
315
+ with stats_cols[3]:
316
+ if iris_strip is not None:
317
+ st.metric("Strip Size", f"{iris_strip.shape[1]}x{iris_strip.shape[0]}")
318
 
319
+ except Exception as e:
320
+ st.error(f"โŒ Error during segmentation: {str(e)}")
321
+
322
+ except Exception as e:
323
+ st.error(f"โŒ Error loading image: {str(e)}")
324
+ st.info("Please try uploading a different image or reducing the file size.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
  # Footer
327
  st.markdown("---")
328
  st.markdown("""
329
  <div style='text-align: center'>
330
  <p><strong>VREyeSAM</strong> - Virtual Reality Non-Frontal Iris Segmentation</p>
 
331
  <p>๐Ÿ”— <a href='https://github.com/GeetanjaliGTZ/VREyeSAM'>GitHub</a> |
332
  ๐Ÿ“ง <a href='mailto:geetanjalisharma546@gmail.com'>Contact</a></p>
333
  </div>
requirements_deploy.txt CHANGED
@@ -1,39 +1,27 @@
1
- # VREyeSAM Requirements - Fixed Version Constraints
2
- # Compatible with Python 3.11+
3
- # This version resolves NumPy conflicts with gensim and numba
4
 
5
- # Web Interface
6
- streamlit>=1.28.0,<2.0.0
7
-
8
- # Core ML and Deep Learning - COMPATIBLE VERSIONS
9
- torch==2.3.0
10
- torchvision==0.18.0
11
  numpy>=1.22.0,<2.0.0
12
 
13
  # Computer Vision
14
- opencv-python-headless>=4.5.0,<5.0.0
15
- Pillow>=8.0.0,<11.0.0
16
 
17
- # Data Processing and ML
18
- pandas>=1.3.0,<3.0.0
19
- scikit-learn>=1.0.0,<2.0.0
20
 
21
  # Visualization
22
- matplotlib>=3.5.0,<4.0.0
23
 
24
  # Utility
25
- tqdm>=4.62.0,<5.0.0
26
- hydra-core>=1.1.0,<2.0.0
27
- omegaconf>=2.1.0,<3.0.0
28
 
29
  # For downloading model weights
30
- huggingface-hub>=0.19.0,<1.0.0
31
-
32
- # Note: Install PyTorch with CUDA support separately if needed:
33
- # For CUDA 11.8: pip install torch==2.3.0 torchvision==0.18.0 --index-url https://download.pytorch.org/whl/cu118
34
- # For CUDA 12.1: pip install torch==2.3.0 torchvision==0.18.0 --index-url https://download.pytorch.org/whl/cu121
35
- # For CPU only: pip install torch==2.3.0 torchvision==0.18.0 --index-url https://download.pytorch.org/whl/cpu
36
 
37
- # SAM2 will be installed separately from git:
38
- # git clone https://github.com/facebookresearch/segment-anything-2
39
- # cd segment-anything-2 && pip install -e . && cd ..
 
1
+ # Streamlit for web interface
2
+ streamlit>=1.28.0
 
3
 
4
+ # Core ML and Deep Learning
5
+ torch>=2.0.0,<2.5.0
6
+ torchvision>=0.15.0,<0.20.0
 
 
 
7
  numpy>=1.22.0,<2.0.0
8
 
9
  # Computer Vision
10
+ opencv-python-headless>=4.5.0
11
+ Pillow>=8.0.0
12
 
13
+ # Data Processing
14
+ pandas>=1.3.0
15
+ scikit-learn>=1.0.0
16
 
17
  # Visualization
18
+ matplotlib>=3.5.0
19
 
20
  # Utility
21
+ tqdm>=4.62.0
22
+ hydra-core>=1.1.0
 
23
 
24
  # For downloading model weights
25
+ huggingface-hub>=0.19.0
 
 
 
 
 
26
 
27
+ # Note: SAM2 will be installed from git in Dockerfile