Spaces:
Running
Running
Update to latest Gradio and fix deprecated APIs
Browse files- Update sdk_version from 2.8.12 to 5.29.0
- Replace deprecated gr.inputs/gr.outputs with modern Gradio components
- Replace removed huggingface_hub.keras_mixin.from_pretrained_keras with snapshot_download + tf.keras.models.load_model
- Replace tensorflow.keras.preprocessing.image with numpy operations
- Fix cv2.merge to use 2D arrays instead of 3D
- Cast slider values to int where needed
- Add huggingface_hub to requirements.txt
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
- README.md +1 -1
- app.py +27 -28
- requirements.txt +2 -1
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 👀
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.29.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -4,15 +4,10 @@ from sklearn import datasets
|
|
| 4 |
import numpy as np
|
| 5 |
from matplotlib import pyplot as plt
|
| 6 |
from scipy import ndimage
|
| 7 |
-
from skimage import measure, color
|
| 8 |
-
from tensorflow.keras.preprocessing import image
|
| 9 |
-
from scipy import ndimage
|
| 10 |
-
import skimage.io as io
|
| 11 |
-
import skimage.transform as trans
|
| 12 |
-
import numpy as np
|
| 13 |
import tensorflow as tf
|
| 14 |
import gradio as gr
|
| 15 |
-
from huggingface_hub
|
| 16 |
from itertools import cycle, islice
|
| 17 |
|
| 18 |
|
|
@@ -49,7 +44,8 @@ def create_input_image(data, visualize=False):
|
|
| 49 |
|
| 50 |
return input
|
| 51 |
|
| 52 |
-
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
def get_instances(prediction, data, max_filter_size=1):
|
|
@@ -60,7 +56,9 @@ def get_instances(prediction, data, max_filter_size=1):
|
|
| 60 |
prediction[prediction == 4] = 255
|
| 61 |
|
| 62 |
#Convert to 8-bit image
|
| 63 |
-
prediction =
|
|
|
|
|
|
|
| 64 |
|
| 65 |
#Get 1 color channel
|
| 66 |
cells=prediction[:,:,0]
|
|
@@ -83,7 +81,7 @@ def get_instances(prediction, data, max_filter_size=1):
|
|
| 83 |
markers[unknown==255] = 0
|
| 84 |
|
| 85 |
#Watershed
|
| 86 |
-
img = cv2.merge((
|
| 87 |
markers = cv2.watershed(img,markers)
|
| 88 |
img[markers == -1] = [0,255,255]
|
| 89 |
|
|
@@ -125,6 +123,11 @@ def get_instances(prediction, data, max_filter_size=1):
|
|
| 125 |
|
| 126 |
|
| 127 |
def visual_clustering(cluster_type, num_clusters, num_samples, noise, random_state, median_kernel_size, max_kernel_size):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
NUM_CLUSTERS = num_clusters
|
| 130 |
CLUSTER_STD = 4 * np.ones(NUM_CLUSTERS)
|
|
@@ -197,25 +200,21 @@ Gradio Demo for Visual Clustering on synthetic datasets.
|
|
| 197 |
'''
|
| 198 |
|
| 199 |
iface = gr.Interface(
|
| 200 |
-
|
| 201 |
-
fn=visual_clustering,
|
| 202 |
-
|
| 203 |
inputs=[
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
outputs=[
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
title=title,
|
| 219 |
description=description,
|
| 220 |
-
|
| 221 |
-
iface.launch(
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
from matplotlib import pyplot as plt
|
| 6 |
from scipy import ndimage
|
| 7 |
+
from skimage import measure, color
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
import tensorflow as tf
|
| 9 |
import gradio as gr
|
| 10 |
+
from huggingface_hub import snapshot_download
|
| 11 |
from itertools import cycle, islice
|
| 12 |
|
| 13 |
|
|
|
|
| 44 |
|
| 45 |
return input
|
| 46 |
|
| 47 |
+
model_path = snapshot_download("tareknaous/unet-visual-clustering")
|
| 48 |
+
model = tf.keras.models.load_model(model_path)
|
| 49 |
|
| 50 |
|
| 51 |
def get_instances(prediction, data, max_filter_size=1):
|
|
|
|
| 56 |
prediction[prediction == 4] = 255
|
| 57 |
|
| 58 |
#Convert to 8-bit image
|
| 59 |
+
prediction = np.uint8(prediction)
|
| 60 |
+
if prediction.ndim == 2:
|
| 61 |
+
prediction = prediction[..., np.newaxis]
|
| 62 |
|
| 63 |
#Get 1 color channel
|
| 64 |
cells=prediction[:,:,0]
|
|
|
|
| 81 |
markers[unknown==255] = 0
|
| 82 |
|
| 83 |
#Watershed
|
| 84 |
+
img = cv2.merge((cells, cells, cells))
|
| 85 |
markers = cv2.watershed(img,markers)
|
| 86 |
img[markers == -1] = [0,255,255]
|
| 87 |
|
|
|
|
| 123 |
|
| 124 |
|
| 125 |
def visual_clustering(cluster_type, num_clusters, num_samples, noise, random_state, median_kernel_size, max_kernel_size):
|
| 126 |
+
num_clusters = int(num_clusters)
|
| 127 |
+
num_samples = int(num_samples)
|
| 128 |
+
random_state = int(random_state)
|
| 129 |
+
median_kernel_size = int(median_kernel_size)
|
| 130 |
+
max_kernel_size = int(max_kernel_size)
|
| 131 |
|
| 132 |
NUM_CLUSTERS = num_clusters
|
| 133 |
CLUSTER_STD = 4 * np.ones(NUM_CLUSTERS)
|
|
|
|
| 200 |
'''
|
| 201 |
|
| 202 |
iface = gr.Interface(
|
| 203 |
+
fn=visual_clustering,
|
|
|
|
|
|
|
| 204 |
inputs=[
|
| 205 |
+
gr.Dropdown(choices=["blobs", "varied blobs", "aniso", "noisy moons", "noisy circles"], label="Cluster Type"),
|
| 206 |
+
gr.Slider(minimum=1, maximum=10, step=1, label="Number of Clusters"),
|
| 207 |
+
gr.Slider(minimum=10000, maximum=1000000, step=10000, label="Number of Samples"),
|
| 208 |
+
gr.Slider(minimum=0.03, maximum=0.1, step=0.01, label="Noise"),
|
| 209 |
+
gr.Slider(minimum=1, maximum=100, step=1, label="Random State"),
|
| 210 |
+
gr.Slider(minimum=1, maximum=100, step=1, label="Denoising Filter Kernel Size"),
|
| 211 |
+
gr.Slider(minimum=1, maximum=100, step=1, label="Max Filter Kernel Size"),
|
| 212 |
+
],
|
|
|
|
| 213 |
outputs=[
|
| 214 |
+
gr.Plot(label="Dataset"),
|
| 215 |
+
gr.Plot(label="Clustering Result"),
|
| 216 |
+
],
|
|
|
|
| 217 |
title=title,
|
| 218 |
description=description,
|
| 219 |
+
)
|
| 220 |
+
iface.launch()
|
requirements.txt
CHANGED
|
@@ -6,4 +6,5 @@ scipy
|
|
| 6 |
tensorflow
|
| 7 |
matplotlib
|
| 8 |
numpy
|
| 9 |
-
opencv-python
|
|
|
|
|
|
| 6 |
tensorflow
|
| 7 |
matplotlib
|
| 8 |
numpy
|
| 9 |
+
opencv-python
|
| 10 |
+
huggingface_hub
|