mgumowsk commited on
Commit
776cca3
Β·
1 Parent(s): 2ddbf77

Add Gradio app with model_api inference and LFS-tracked models

Browse files
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ venv/
2
+ .venv/
3
+ __pycache__/
4
+ *.pyc
5
+ *.pyo
6
+ *.pyd
7
+ .Python
8
+ *.so
9
+ *.egg
10
+ *.egg-info/
11
+ dist/
12
+ build/
13
+ .env
14
+ .DS_Store
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Model Api
3
  emoji: πŸ“š
4
  colorFrom: red
5
  colorTo: blue
@@ -7,7 +7,7 @@ sdk: gradio
7
  sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
10
- short_description: Model API UI demo
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Resnet with OpenVINO and model_api
3
  emoji: πŸ“š
4
  colorFrom: red
5
  colorTo: blue
 
7
  sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Detection example using model_api
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Object Detection with model_api - Gradio Application
3
+ Copyright (C) 2025
4
+ """
5
+
6
+ import gradio as gr
7
+ import numpy as np
8
+ from pathlib import Path
9
+ from PIL import Image
10
+ import time
11
+ from typing import Tuple, Optional
12
+ import glob
13
+
14
+ from model_api.models import Model
15
+ from model_api.visualizer import Visualizer
16
+
17
+
18
+ # Global variables for model caching
19
+ current_model = None
20
+ current_model_name = None
21
+ visualizer = Visualizer()
22
+
23
+
24
+ def get_available_models():
25
+ """
26
+ Scan the models folder for .xml files and return list of model names.
27
+
28
+ Returns:
29
+ list: List of model names (without .xml extension)
30
+ """
31
+ models_dir = Path("models")
32
+ if not models_dir.exists():
33
+ return []
34
+
35
+ xml_files = list(models_dir.glob("*.xml"))
36
+ model_names = [f.stem for f in xml_files]
37
+ return sorted(model_names)
38
+
39
+
40
+ def load_model(model_name: str, device: str = "CPU"):
41
+ """
42
+ Load OpenVINO model using model_api.
43
+
44
+ Args:
45
+ model_name: Name of the model (without .xml extension)
46
+ device: Inference device (CPU, GPU, etc.)
47
+
48
+ Returns:
49
+ Model instance from model_api
50
+ """
51
+ global current_model, current_model_name
52
+
53
+ # Check if model is already loaded
54
+ if current_model is not None and current_model_name == model_name:
55
+ return current_model
56
+
57
+ model_path = Path("models") / f"{model_name}.xml"
58
+
59
+ if not model_path.exists():
60
+ raise FileNotFoundError(f"Model not found: {model_path}")
61
+
62
+ print(f"Loading model: {model_name}")
63
+ model = Model.create_model(str(model_path), device=device)
64
+
65
+ # Warm-up inference
66
+ print("Warming up model...")
67
+ dummy_image = np.ones((224, 224, 3), dtype=np.uint8)
68
+ for _ in range(3):
69
+ _ = model(dummy_image)
70
+
71
+ # Reset metrics after warm-up
72
+ model.get_performance_metrics().reset()
73
+
74
+ current_model = model
75
+ current_model_name = model_name
76
+
77
+ print(f"Model {model_name} loaded successfully")
78
+ return model
79
+
80
+
81
+ def classify_image(
82
+ image: np.ndarray,
83
+ model_name: str,
84
+ confidence_threshold: float
85
+ ) -> Tuple[Image.Image, str, str]:
86
+ """
87
+ Perform image classification and return visualized result with metrics.
88
+
89
+ Args:
90
+ image: Input image as numpy array
91
+ model_name: Name of the model to use
92
+ confidence_threshold: Confidence threshold for filtering predictions
93
+
94
+ Returns:
95
+ Tuple of (visualized_image, detections_text, metrics_text)
96
+ """
97
+ try:
98
+ # Load model
99
+ model = load_model(model_name)
100
+
101
+ # Convert numpy array to PIL Image if needed
102
+ if isinstance(image, np.ndarray):
103
+ pil_image = Image.fromarray(image)
104
+ else:
105
+ pil_image = image
106
+
107
+ # Convert PIL to numpy for model_api
108
+ image_np = np.array(pil_image)
109
+
110
+ # Run inference
111
+ result = model(image_np)
112
+
113
+ # Get performance metrics
114
+ metrics = model.get_performance_metrics()
115
+ inference_time = metrics.get_inference_time()
116
+ preprocess_time = metrics.get_preprocess_time()
117
+ postprocess_time = metrics.get_postprocess_time()
118
+ fps = metrics.get_fps()
119
+
120
+ # Format metrics text
121
+ metrics_text = f"""⚑ Performance Metrics:
122
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
123
+ πŸ”„ Preprocessing: {preprocess_time.mean()*1000:.2f} ms
124
+ βš™οΈ Inference: {inference_time.mean()*1000:.2f} ms
125
+ πŸ“Š Postprocessing: {postprocess_time.mean()*1000:.2f} ms
126
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
127
+ ⏱️ Total Time: {(preprocess_time.mean() + inference_time.mean() + postprocess_time.mean())*1000:.2f} ms
128
+ 🎯 FPS: {fps:.2f}
129
+ πŸ“ˆ Total Frames: {inference_time.count}
130
+ """
131
+
132
+ # Filter predictions by confidence threshold
133
+ detections_text = "πŸ” Detected Objects:\n"
134
+ detections_text += "━" * 50 + "\n"
135
+
136
+ if result.top_labels and len(result.top_labels) > 0:
137
+ filtered_labels = [
138
+ label for label in result.top_labels
139
+ if label.confidence >= confidence_threshold
140
+ ]
141
+
142
+ if filtered_labels:
143
+ for i, label in enumerate(filtered_labels, 1):
144
+ detections_text += f"{i}. {label.name}: {label.confidence:.3f}\n"
145
+ else:
146
+ detections_text += f"No detections above confidence threshold {confidence_threshold:.2f}\n"
147
+ else:
148
+ detections_text += "No detections found\n"
149
+
150
+ # Visualize results using model_api's visualizer
151
+ visualized_image = visualizer.render(pil_image, result)
152
+
153
+ return visualized_image, detections_text, metrics_text
154
+
155
+ except Exception as e:
156
+ error_msg = f"Error during inference: {str(e)}"
157
+ print(error_msg)
158
+ return image, error_msg, "Error: Could not compute metrics"
159
+
160
+
161
+ def create_gradio_interface():
162
+ """
163
+ Create and configure the Gradio interface.
164
+
165
+ Returns:
166
+ gr.Blocks: Configured Gradio interface
167
+ """
168
+ available_models = get_available_models()
169
+
170
+ if not available_models:
171
+ print("Warning: No models found in models/ folder")
172
+ available_models = ["No models available"]
173
+
174
+ with gr.Blocks(title="Object Detection with model_api") as demo:
175
+ gr.Markdown("# 🎯 Object Detection with model_api")
176
+ gr.Markdown("Upload an image and select a model to perform object detection using OpenVINO and model_api")
177
+
178
+ with gr.Row():
179
+ with gr.Column(scale=1):
180
+ input_image = gr.Image(
181
+ label="Input Image",
182
+ type="numpy",
183
+ height=400
184
+ )
185
+
186
+ model_dropdown = gr.Dropdown(
187
+ choices=available_models,
188
+ value=available_models[0] if available_models else None,
189
+ label="Select Model",
190
+ info="Choose a model from the models/ folder"
191
+ )
192
+
193
+ confidence_slider = gr.Slider(
194
+ minimum=0.0,
195
+ maximum=1.0,
196
+ value=0.3,
197
+ step=0.05,
198
+ label="Confidence Threshold",
199
+ info="Minimum confidence for displaying predictions"
200
+ )
201
+
202
+ classify_btn = gr.Button("πŸš€ Run Inference", variant="primary")
203
+
204
+ with gr.Column(scale=1):
205
+ output_image = gr.Image(
206
+ label="Detection Result",
207
+ type="pil",
208
+ height=400
209
+ )
210
+
211
+ detections_output = gr.Textbox(
212
+ label="Detected Objects",
213
+ lines=8,
214
+ max_lines=15
215
+ )
216
+
217
+ metrics_output = gr.Textbox(
218
+ label="Performance Metrics",
219
+ lines=8,
220
+ max_lines=15
221
+ )
222
+
223
+ # Examples section
224
+ gr.Markdown("## πŸ“Έ Examples")
225
+ gr.Examples(
226
+ examples=[
227
+ ["examples/image1.jpg", available_models[0] if available_models else "resnet18", 0.3],
228
+ ],
229
+ inputs=[input_image, model_dropdown, confidence_slider],
230
+ outputs=[output_image, detections_output, metrics_output],
231
+ fn=classify_image,
232
+ cache_examples=False
233
+ )
234
+
235
+ # Connect the button to the inference function
236
+ classify_btn.click(
237
+ fn=classify_image,
238
+ inputs=[input_image, model_dropdown, confidence_slider],
239
+ outputs=[output_image, detections_output, metrics_output]
240
+ )
241
+
242
+ return demo
243
+
244
+
245
+ if __name__ == "__main__":
246
+ demo = create_gradio_interface()
247
+ demo.launch(share=False)
examples/image1.jpg ADDED
models/resnet18.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fa954b5cc16f10937920e13aa386ec681d76429df5d0d3069d2187fdf06cb4
3
+ size 23369472
models/resnet18.xml ADDED
The diff for this file is too large to render. See raw diff
 
models/resnet50.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:079dcebdec571c1b6beedc9399a67a30b4e8e2a7c248df13f11984cf56c35ec9
3
+ size 51060992
models/resnet50.xml ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ numpy>=1.21.0
3
+ pillow>=9.0.0
4
+ openvino>=2024.0.0
5
+ git+https://github.com/open-edge-platform/model_api.git