Spaces:
Sleeping
Sleeping
Commit ·
8e9727a
1
Parent(s): a24d404
feat: initialize repository and add project-level gitignore
Browse files
app.py
CHANGED
|
@@ -41,7 +41,6 @@ from mambaeye.scan import generate_scan_positions
|
|
| 41 |
from mambaeye.positional_encoding import sinusoidal_position_encoding_2d
|
| 42 |
from mamba_ssm.utils.generation import InferenceParams
|
| 43 |
|
| 44 |
-
TARGET_CANVAS_SIZE = 512
|
| 45 |
PATCH_SIZE = 16
|
| 46 |
CATEGORIES = ResNet50_Weights.IMAGENET1K_V1.meta["categories"]
|
| 47 |
|
|
@@ -149,26 +148,21 @@ def format_predictions(probs_np):
|
|
| 149 |
def preprocess_image(image_arr):
|
| 150 |
img = Image.fromarray(image_arr).convert("RGB")
|
| 151 |
width, height = img.size
|
| 152 |
-
|
| 153 |
-
ratio = min(TARGET_CANVAS_SIZE / width, TARGET_CANVAS_SIZE / height)
|
| 154 |
-
new_w = int(width * ratio)
|
| 155 |
-
new_h = int(height * ratio)
|
| 156 |
-
|
| 157 |
-
img_resized = img.resize((new_w, new_h), Image.Resampling.LANCZOS)
|
| 158 |
totensor = T.ToTensor()
|
| 159 |
-
img_tensor = totensor(
|
| 160 |
-
|
| 161 |
-
canvas = torch.zeros(3,
|
| 162 |
-
x_offset = (
|
| 163 |
-
y_offset = (
|
| 164 |
|
| 165 |
canvas[:, x_offset : x_offset + img_tensor.shape[1], y_offset : y_offset + img_tensor.shape[2]] = img_tensor
|
| 166 |
|
| 167 |
-
return canvas, x_offset, y_offset,
|
| 168 |
|
| 169 |
def extract_patch(canvas_tensor, px, py):
|
| 170 |
-
|
| 171 |
-
|
|
|
|
| 172 |
patch = canvas_tensor[:, px : px + PATCH_SIZE, py : py + PATCH_SIZE]
|
| 173 |
return patch.flatten()
|
| 174 |
|
|
|
|
| 41 |
from mambaeye.positional_encoding import sinusoidal_position_encoding_2d
|
| 42 |
from mamba_ssm.utils.generation import InferenceParams
|
| 43 |
|
|
|
|
| 44 |
PATCH_SIZE = 16
|
| 45 |
CATEGORIES = ResNet50_Weights.IMAGENET1K_V1.meta["categories"]
|
| 46 |
|
|
|
|
| 148 |
def preprocess_image(image_arr):
|
| 149 |
img = Image.fromarray(image_arr).convert("RGB")
|
| 150 |
width, height = img.size
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
totensor = T.ToTensor()
|
| 152 |
+
img_tensor = totensor(img)
|
| 153 |
+
canvas_size = max(width, height)
|
| 154 |
+
canvas = torch.zeros(3, canvas_size, canvas_size, dtype=torch.float32)
|
| 155 |
+
x_offset = (canvas_size - img_tensor.shape[1]) // 2
|
| 156 |
+
y_offset = (canvas_size - img_tensor.shape[2]) // 2
|
| 157 |
|
| 158 |
canvas[:, x_offset : x_offset + img_tensor.shape[1], y_offset : y_offset + img_tensor.shape[2]] = img_tensor
|
| 159 |
|
| 160 |
+
return canvas, x_offset, y_offset, height, width
|
| 161 |
|
| 162 |
def extract_patch(canvas_tensor, px, py):
|
| 163 |
+
canvas_size = canvas_tensor.shape[1]
|
| 164 |
+
px = max(0, min(px, canvas_size - PATCH_SIZE))
|
| 165 |
+
py = max(0, min(py, canvas_size - PATCH_SIZE))
|
| 166 |
patch = canvas_tensor[:, px : px + PATCH_SIZE, py : py + PATCH_SIZE]
|
| 167 |
return patch.flatten()
|
| 168 |
|