VishaliniS456 commited on
Commit
4cda8b7
·
verified ·
1 Parent(s): 25b1bc0

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitignore +34 -0
  2. README.md +104 -12
  3. app.py +321 -0
  4. download_oscd.py +75 -0
  5. requirements.txt +15 -0
.gitignore ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Virtual environments
2
+ venv/
3
+ env/
4
+ ENV/
5
+ .venv
6
+
7
+ # Python cache
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+ *.so
12
+
13
+ # Gradio cache
14
+ .gradio/
15
+
16
+ # IDE
17
+ .vscode/
18
+ .idea/
19
+ *.swp
20
+ *.swo
21
+
22
+ # OS
23
+ .DS_Store
24
+ Thumbs.db
25
+
26
+ # Python
27
+ *.egg-info/
28
+ dist/
29
+ build/
30
+
31
+ # Jupyter
32
+ .ipynb_checkpoints/
33
+ *.ipynb_checkpoints
34
+
README.md CHANGED
@@ -1,12 +1,104 @@
1
- ---
2
- title: SentinelWatch
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 6.6.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SentinelWatch
2
+
3
+ Detect changes in Sentinel-2 satellite imagery using Vision Transformers. Upload before/after images and get instant change detection with automatic cloud masking.
4
+
5
+ **Features:**
6
+ - Cloud detection with confidence scoring
7
+ - Change detection using Siamese ViT architecture
8
+ - Interactive web interface (Gradio)
9
+ - Evaluation metrics (IoU, F1, Accuracy)
10
+
11
+ ## Project Structure
12
+
13
+ ```
14
+ ├── app.py # Gradio web interface
15
+ ├── requirements.txt # Dependencies
16
+ ├── models/
17
+ │ ├── cloud_detector.py # Cloud detection model
18
+ │ └── change_detector.py # Change detection model
19
+ ├── utils/
20
+ │ ├── preprocessing.py # Image preprocessing
21
+ │ ├── visualization.py # Visualization utilities
22
+ │ ├── evaluation.py # Metrics
23
+ │ └── metrics.py # Advanced metrics
24
+ ├── examples/ # Sample images
25
+ │ ├── before/
26
+ │ ├── after/
27
+ │ └── ground_truth/
28
+ └── notebooks/
29
+ └── fine_tune_vit.ipynb # Fine-tuning tutorial
30
+ ```
31
+
32
+ ## Quick Start
33
+
34
+ **Requirements:** Python 3.8+, CUDA 11.0+ (optional)
35
+
36
+ 1. **Clone and setup:**
37
+ ```bash
38
+ cd Sentinel-Watch
39
+ python -m venv venv
40
+ source venv/bin/activate
41
+ pip install -r requirements.txt
42
+ ```
43
+
44
+ 2. **Run the app:**
45
+ ```bash
46
+ python app.py
47
+ ```
48
+ Opens at `http://localhost:7860`
49
+
50
+ 3. **(Optional) Download example data:**
51
+ ```bash
52
+ python download_oscd.py
53
+ ```
54
+
55
+ ## Usage
56
+
57
+ ### Web Interface
58
+ - **Cloud Detection Tab**: Upload image → detect clouds
59
+ - **Change Detection Tab**: Upload before/after → detect changes
60
+ - **Examples Tab**: View pre-loaded results
61
+
62
+ ### Python API
63
+
64
+ ```python
65
+ from models.cloud_detector import CloudDetector
66
+ from models.change_detector import ChangeDetector
67
+ import cv2
68
+
69
+ before = cv2.imread("before.jpg")
70
+ after = cv2.imread("after.jpg")
71
+ before = cv2.cvtColor(before, cv2.COLOR_BGR2RGB)
72
+ after = cv2.cvtColor(after, cv2.COLOR_BGR2RGB)
73
+
74
+ cloud_detector = CloudDetector()
75
+ change_detector = ChangeDetector()
76
+
77
+ # Cloud detection
78
+ cloud_mask, confidence = cloud_detector.detect_clouds(before)
79
+
80
+ # Change detection
81
+ change_mask, confidence = change_detector.detect_changes(before, after)
82
+ ```
83
+
84
+ ## Model Architecture
85
+
86
+ **Cloud Detector:**
87
+ - Vision Transformer (ViT-Base)
88
+ - Input: 224×224 RGB images
89
+ - Output: Binary cloud mask + confidence scores
90
+
91
+ **Change Detector:**
92
+ - Siamese ViT network
93
+ - Compares before/after image patches
94
+ - Output: Change mask + confidence map
95
+
96
+ ## Metrics
97
+
98
+ - **IoU** (Intersection over Union)
99
+ - **F1 Score**
100
+ - **Accuracy, Precision, Recall**
101
+
102
+ ## License
103
+
104
+ MIT License
app.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ from pathlib import Path
5
+ from typing import Tuple, Optional
6
+ import os
7
+
8
+ from models.cloud_detector import CloudDetector
9
+ from models.change_detector import ChangeDetector
10
+ from utils.preprocessing import preprocess_image, mask_clouds
11
+ from utils.visualization import create_overlay, visualize_predictions
12
+ from utils.evaluation import calculate_metrics
13
+ from utils.metrics import compare_with_without_masking, calculate_change_statistics
14
+
15
+
16
+ # Initialize models
17
+ device = "cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu"
18
+ cloud_detector = CloudDetector(device=device)
19
+ change_detector = ChangeDetector(device=device)
20
+
21
+
22
+ def load_example_images():
23
+ """Load example images from examples directory."""
24
+ examples_dir = Path("examples")
25
+
26
+ examples = []
27
+ before_files = sorted(
28
+ list((examples_dir / "before").glob("*.png")) +
29
+ list((examples_dir / "before").glob("*.jpg"))
30
+ )
31
+ after_files = sorted(
32
+ list((examples_dir / "after").glob("*.png")) +
33
+ list((examples_dir / "after").glob("*.jpg"))
34
+ )
35
+
36
+ for before_file, after_file in zip(before_files, after_files):
37
+ before = cv2.imread(str(before_file))
38
+ after = cv2.imread(str(after_file))
39
+
40
+ if before is not None and after is not None:
41
+ before = cv2.cvtColor(before, cv2.COLOR_BGR2RGB)
42
+ after = cv2.cvtColor(after, cv2.COLOR_BGR2RGB)
43
+ examples.append([before, after])
44
+
45
+ return examples
46
+
47
+
48
+ def detect_clouds_in_image(
49
+ image: np.ndarray,
50
+ cloud_threshold: float = 0.5
51
+ ) -> Tuple[np.ndarray, str]:
52
+ """
53
+ Detect clouds in a single image.
54
+
55
+ Args:
56
+ image: Input image (H, W, 3)
57
+ cloud_threshold: Confidence threshold
58
+
59
+ Returns:
60
+ Tuple of (overlay_image, stats_text)
61
+ """
62
+ if image is None:
63
+ return None, "Please upload an image."
64
+
65
+ # Preprocess (normalise to float [0,1])
66
+ preprocessed = preprocess_image(image, normalize=True)
67
+
68
+ # Detect clouds — returns 2D mask and 2D confidence map
69
+ cloud_mask, cloud_confidence = cloud_detector.detect_clouds(
70
+ preprocessed,
71
+ threshold=cloud_threshold
72
+ )
73
+
74
+ # Create visualization overlay on original image
75
+ overlay = create_overlay(image, cloud_mask, alpha=0.5, color=(0, 0, 255))
76
+
77
+ # Statistics — all values are now properly 2D arrays
78
+ total_pixels = int(cloud_mask.size)
79
+ cloud_pixels = int(np.sum(cloud_mask))
80
+ cloud_pct = 100.0 * cloud_pixels / total_pixels if total_pixels > 0 else 0.0
81
+ mean_conf = float(cloud_confidence.mean())
82
+ max_conf = float(cloud_confidence.max())
83
+ min_conf = float(cloud_confidence.min())
84
+
85
+ stats_text = (
86
+ f"Cloud Detection Results:\n"
87
+ f"─────────────────────\n"
88
+ f"Cloud Pixels: {cloud_pixels}\n"
89
+ f"Total Pixels: {total_pixels}\n"
90
+ f"Cloud Percentage: {cloud_pct:.2f}%\n"
91
+ f"Mean Confidence: {mean_conf:.4f}\n"
92
+ f"Max Confidence: {max_conf:.4f}\n"
93
+ f"Min Confidence: {min_conf:.4f}"
94
+ )
95
+
96
+ return overlay, stats_text
97
+
98
+
99
+ def detect_changes(
100
+ before_image: np.ndarray,
101
+ after_image: np.ndarray,
102
+ apply_cloud_masking: bool = True,
103
+ cloud_threshold: float = 0.5,
104
+ change_threshold: float = 0.5
105
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, str, str]:
106
+ """
107
+ Detect changes between two temporal images.
108
+
109
+ Returns:
110
+ Tuple of (before_overlay, after_overlay, change_mask_vis,
111
+ metrics_text, stats_text)
112
+ """
113
+ if before_image is None or after_image is None:
114
+ empty = np.zeros((224, 224, 3), dtype=np.uint8)
115
+ return empty, empty, empty, "Please upload both images.", ""
116
+
117
+ # Resize both to the same size before processing
118
+ TARGET = (512, 512)
119
+ before_image = cv2.resize(before_image, TARGET, interpolation=cv2.INTER_LINEAR)
120
+ after_image = cv2.resize(after_image, TARGET, interpolation=cv2.INTER_LINEAR)
121
+
122
+ # Preprocess to float [0,1]
123
+ before_preprocessed = preprocess_image(before_image, normalize=True)
124
+ after_preprocessed = preprocess_image(after_image, normalize=True)
125
+
126
+ cloud_mask = None
127
+
128
+ if apply_cloud_masking:
129
+ cloud_mask_before, _ = cloud_detector.detect_clouds(
130
+ before_preprocessed, threshold=cloud_threshold
131
+ )
132
+ cloud_mask_after, _ = cloud_detector.detect_clouds(
133
+ after_preprocessed, threshold=cloud_threshold
134
+ )
135
+
136
+ # Combined cloud mask (union of both)
137
+ cloud_mask = np.logical_or(cloud_mask_before, cloud_mask_after).astype(np.uint8)
138
+
139
+ before_masked = mask_clouds(before_preprocessed, cloud_mask, fill_value=0.0)
140
+ after_masked = mask_clouds(after_preprocessed, cloud_mask, fill_value=0.0)
141
+ else:
142
+ before_masked = before_preprocessed
143
+ after_masked = after_preprocessed
144
+
145
+ # Detect changes — now returns proper 2D arrays
146
+ change_mask, change_confidence = change_detector.detect_changes(
147
+ before_masked,
148
+ after_masked,
149
+ threshold=change_threshold
150
+ )
151
+
152
+ # Overlays on original images
153
+ before_overlay = create_overlay(before_image, change_mask, alpha=0.5, color=(255, 0, 0))
154
+ after_overlay = create_overlay(after_image, change_mask, alpha=0.5, color=(255, 0, 0))
155
+
156
+ if cloud_mask is not None:
157
+ cloud_overlay_before = create_overlay(before_image, cloud_mask, alpha=0.4, color=(0, 0, 255))
158
+ cloud_overlay_after = create_overlay(after_image, cloud_mask, alpha=0.4, color=(0, 0, 255))
159
+ before_overlay = (before_overlay * 0.5 + cloud_overlay_before * 0.5).astype(np.uint8)
160
+ after_overlay = (after_overlay * 0.5 + cloud_overlay_after * 0.5).astype(np.uint8)
161
+
162
+ # Change mask visualisation (white = changed)
163
+ change_mask_vis = (change_mask * 255).astype(np.uint8)
164
+ change_mask_vis = cv2.cvtColor(change_mask_vis, cv2.COLOR_GRAY2RGB)
165
+
166
+ # Statistics from 2D arrays — all values are valid now
167
+ stats = calculate_change_statistics(change_mask, change_confidence)
168
+
169
+ metrics_text = (
170
+ f"Change Detection Metrics:\n"
171
+ f"─────────────────────────\n"
172
+ f"Mean Confidence: {float(change_confidence.mean()):.4f}\n"
173
+ f"Max Confidence: {float(change_confidence.max()):.4f}\n"
174
+ f"Min Confidence: {float(change_confidence.min()):.4f}\n"
175
+ f"Algorithm: Siamese ViT\n"
176
+ f"Cloud Masking: {'Yes' if apply_cloud_masking else 'No'}"
177
+ )
178
+
179
+ # Safe access to change_confidence_mean
180
+ if stats["changed_pixels"] > 0:
181
+ change_conf_line = (
182
+ f"Change Region Confidence: {stats['change_confidence_mean']:.4f}"
183
+ )
184
+ else:
185
+ change_conf_line = "No changes detected above threshold"
186
+
187
+ stats_text = (
188
+ f"Change Statistics:\n"
189
+ f"──────────────────\n"
190
+ f"Total Pixels: {stats['total_pixels']}\n"
191
+ f"Changed Pixels: {stats['changed_pixels']}\n"
192
+ f"Unchanged Pixels: {stats['unchanged_pixels']}\n"
193
+ f"Change Percentage: {stats['change_percentage']:.2f}%\n"
194
+ f"Mean Confidence: {stats['mean_confidence']:.4f}\n"
195
+ f"Min Confidence: {stats['min_confidence']:.4f}\n"
196
+ f"Max Confidence: {stats['max_confidence']:.4f}\n"
197
+ f"{change_conf_line}"
198
+ )
199
+
200
+ return before_overlay, after_overlay, change_mask_vis, metrics_text, stats_text
201
+
202
+
203
+ def create_comparison_interface():
204
+ """Create Gradio interface for change detection comparison."""
205
+
206
+ with gr.Blocks(title="Satellite Change Detector") as demo:
207
+ gr.Markdown(
208
+ """
209
+ # Satellite Change Detection System
210
+
211
+ Detect changes in Sentinel-2 satellite imagery using Vision Transformer models.
212
+ Compare results with and without cloud masking.
213
+ """
214
+ )
215
+
216
+ with gr.Tabs():
217
+ # ── Cloud Detection Tab ──────────────────────────────────────────
218
+ with gr.Tab("Cloud Detection"):
219
+ gr.Markdown("### Detect and visualize clouds in satellite imagery")
220
+
221
+ with gr.Row():
222
+ with gr.Column():
223
+ cloud_input = gr.Image(label="Input Image", type="numpy")
224
+ cloud_threshold = gr.Slider(
225
+ 0, 1, value=0.5, step=0.01,
226
+ label="Cloud Detection Threshold"
227
+ )
228
+ cloud_detect_btn = gr.Button("Detect Clouds")
229
+
230
+ with gr.Column():
231
+ cloud_overlay_output = gr.Image(label="Cloud Detection Result")
232
+ cloud_stats_output = gr.Textbox(label="Statistics", lines=8)
233
+
234
+ cloud_detect_btn.click(
235
+ detect_clouds_in_image,
236
+ inputs=[cloud_input, cloud_threshold],
237
+ outputs=[cloud_overlay_output, cloud_stats_output]
238
+ )
239
+
240
+ # ── Change Detection Tab ─────────────────────────────────────────
241
+ with gr.Tab("Change Detection"):
242
+ gr.Markdown("### Detect changes between two temporal satellite images")
243
+
244
+ with gr.Row():
245
+ with gr.Column():
246
+ before_img = gr.Image(label="Before Image", type="numpy")
247
+ after_img = gr.Image(label="After Image", type="numpy")
248
+
249
+ with gr.Column():
250
+ gr.Markdown("### Settings")
251
+ apply_masking = gr.Checkbox(
252
+ value=True,
253
+ label="Apply Cloud Masking"
254
+ )
255
+ cloud_thresh = gr.Slider(
256
+ 0, 1, value=0.5, step=0.01,
257
+ label="Cloud Threshold"
258
+ )
259
+ change_thresh = gr.Slider(
260
+ 0, 1, value=0.5, step=0.01,
261
+ label="Change Threshold"
262
+ )
263
+ detect_btn = gr.Button("Detect Changes", size="lg")
264
+
265
+ with gr.Row():
266
+ before_overlay_output = gr.Image(label="Before with Changes")
267
+ after_overlay_output = gr.Image(label="After with Changes")
268
+
269
+ with gr.Row():
270
+ change_mask_output = gr.Image(label="Change Mask")
271
+ metrics_output = gr.Textbox(label="Metrics", lines=8)
272
+
273
+ stats_output = gr.Textbox(label="Change Statistics", lines=10)
274
+
275
+ detect_btn.click(
276
+ detect_changes,
277
+ inputs=[before_img, after_img, apply_masking, cloud_thresh, change_thresh],
278
+ outputs=[
279
+ before_overlay_output,
280
+ after_overlay_output,
281
+ change_mask_output,
282
+ metrics_output,
283
+ stats_output
284
+ ]
285
+ )
286
+
287
+ # ── Examples Tab ─────────────────────────────────────────────────
288
+ with gr.Tab("Examples"):
289
+ gr.Markdown("### Pre-loaded example images")
290
+
291
+ examples = load_example_images()
292
+
293
+ if examples:
294
+ for idx, (before, after) in enumerate(examples[:3]):
295
+ with gr.Row():
296
+ gr.Image(value=before, label=f"Example {idx+1}: Before")
297
+ gr.Image(value=after, label=f"Example {idx+1}: After")
298
+ else:
299
+ gr.Markdown(
300
+ "No example images found in `examples/` directory.\n"
301
+ "Run `python setup_oscd.py` to download OSCD samples."
302
+ )
303
+
304
+ gr.Markdown(
305
+ """
306
+ ## About
307
+
308
+ This application uses Vision Transformer (ViT) models for:
309
+ - **Cloud Detection**: Identifies and masks cloud cover in satellite imagery
310
+ - **Change Detection**: Detects land cover changes between multi-temporal observations
311
+
312
+ Models are fine-tuned on Sentinel-2 satellite data.
313
+ """
314
+ )
315
+
316
+ return demo
317
+
318
+
319
+ if __name__ == "__main__":
320
+ demo = create_comparison_interface()
321
+ demo.launch(share=True)
download_oscd.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Quick OSCD RGB download script."""
2
+
3
+ from datasets import load_dataset
4
+ import cv2
5
+ import numpy as np
6
+ from pathlib import Path
7
+ import os
8
+
9
+ # Suppress symlinks warning on Windows
10
+ os.environ['HF_HUB_DISABLE_SYMLINKS_WARNING'] = '1'
11
+
12
+ print("Downloading OSCD RGB dataset...")
13
+
14
+ try:
15
+ ds = load_dataset("blanchon/OSCD_RGB", split="train")
16
+ print(f"Downloaded {len(ds)} samples (taking first 5)")
17
+ print(f"Sample keys: {ds[0].keys()}")
18
+
19
+ # Save samples
20
+ examples_dir = Path("examples")
21
+ (examples_dir / "before").mkdir(parents=True, exist_ok=True)
22
+ (examples_dir / "after").mkdir(parents=True, exist_ok=True)
23
+ (examples_dir / "ground_truth").mkdir(parents=True, exist_ok=True)
24
+
25
+ for idx in range(min(5, len(ds))):
26
+ try:
27
+ sample = ds[idx]
28
+
29
+ # OSCD_RGB dataset uses 'image1', 'image2', 'mask' keys
30
+ if 'image1' not in sample or 'image2' not in sample or 'mask' not in sample:
31
+ print(f" Expected keys not found. Available keys: {sample.keys()}")
32
+ continue
33
+
34
+ before = np.array(sample['image1'], dtype=np.uint8)
35
+ after = np.array(sample['image2'], dtype=np.uint8)
36
+ gt = np.array(sample['mask'], dtype=np.uint8)
37
+
38
+ # Ensure 3-channel RGB
39
+ if before.ndim == 3 and before.shape[2] >= 3:
40
+ before = before[:, :, :3]
41
+ if after.ndim == 3 and after.shape[2] >= 3:
42
+ after = after[:, :, :3]
43
+
44
+ # Save images
45
+ before_path = examples_dir / "before" / f"oscd_{idx:02d}.png"
46
+ after_path = examples_dir / "after" / f"oscd_{idx:02d}.png"
47
+ gt_path = examples_dir / "ground_truth" / f"oscd_{idx:02d}.png"
48
+
49
+ # Convert RGB to BGR for cv2 (if not already BGR)
50
+ if before.dtype == np.uint8:
51
+ before_bgr = cv2.cvtColor(before, cv2.COLOR_RGB2BGR) if before.max() > 1 else before
52
+ after_bgr = cv2.cvtColor(after, cv2.COLOR_RGB2BGR) if after.max() > 1 else after
53
+ else:
54
+ before_bgr = before
55
+ after_bgr = after
56
+
57
+ cv2.imwrite(str(before_path), before_bgr)
58
+ cv2.imwrite(str(after_path), after_bgr)
59
+ cv2.imwrite(str(gt_path), gt * 255 if gt.max() <= 1 else gt)
60
+
61
+ print(f"✓ Saved sample {idx+1}: before={before.shape}, after={after.shape}, gt={gt.shape}")
62
+ except Exception as e:
63
+ print(f"✗ Error saving sample {idx}: {e}")
64
+ import traceback
65
+ traceback.print_exc()
66
+
67
+ print("\n OSCD RGB images downloaded successfully!")
68
+
69
+ except Exception as e:
70
+ print(f"Error downloading dataset: {e}")
71
+ import traceback
72
+ traceback.print_exc()
73
+ print("\nMake sure internet is connected and try again")
74
+
75
+
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.12.0
2
+ torchvision>=0.13.0
3
+ transformers>=4.25.0
4
+ opencv-python>=4.6.0
5
+ numpy>=1.21.0
6
+ scipy>=1.7.0
7
+ scikit-learn>=1.0.0
8
+ matplotlib>=3.5.0
9
+ gradio>=3.35.0
10
+ Pillow>=9.0.0
11
+ jupyter>=1.0.0
12
+ ipykernel>=6.0.0
13
+ tqdm>=4.60.0
14
+ datasets>=2.14.0
15
+ huggingface-hub>=0.17.0