Update links and checkpoint path
Browse files- README.md +7 -5
- scripts/evaluate_zero_shot.py +1 -1
- scripts/gradio_demo.py +1 -1
README.md
CHANGED
|
@@ -2,6 +2,8 @@
|
|
| 2 |
|
| 3 |
CropVLM is a CLIP-based zero-shot image classifier adapted for crop and fruit recognition. It compares one image embedding against text embeddings for candidate class names, then returns the class with the highest cosine similarity.
|
| 4 |
|
|
|
|
|
|
|
| 5 |

|
| 6 |
|
| 7 |
This repository contains:
|
|
@@ -58,7 +60,7 @@ pip install -r requirements.txt
|
|
| 58 |
This Hugging Face repository includes the CropVLM checkpoint:
|
| 59 |
|
| 60 |
```text
|
| 61 |
-
models/
|
| 62 |
```
|
| 63 |
|
| 64 |
You can download it with `huggingface_hub`:
|
|
@@ -68,7 +70,7 @@ from huggingface_hub import hf_hub_download
|
|
| 68 |
|
| 69 |
checkpoint = hf_hub_download(
|
| 70 |
repo_id="boudiafA/CropVLM",
|
| 71 |
-
filename="models/
|
| 72 |
)
|
| 73 |
```
|
| 74 |
|
|
@@ -80,7 +82,7 @@ Run:
|
|
| 80 |
|
| 81 |
```bash
|
| 82 |
python scripts/gradio_demo.py \
|
| 83 |
-
--checkpoint models/
|
| 84 |
```
|
| 85 |
|
| 86 |
Then open:
|
|
@@ -107,7 +109,7 @@ The included examples are `cacao`, `olive`, `cauliflower`, `sugarcane`, and `sun
|
|
| 107 |
from PIL import Image
|
| 108 |
from cropvlm import load_cropvlm
|
| 109 |
|
| 110 |
-
classifier = load_cropvlm("models/
|
| 111 |
image = Image.open("examples/cacao.png")
|
| 112 |
|
| 113 |
for label, score in classifier.predict(image, top_k=5):
|
|
@@ -132,7 +134,7 @@ Run CropVLM and the supported comparison CLIP models:
|
|
| 132 |
```bash
|
| 133 |
python scripts/evaluate_zero_shot.py \
|
| 134 |
--dataset /mnt/e/Desktop/Datasets/FruitDataset/Crop_Dataset_testing \
|
| 135 |
-
--cropvlm-checkpoint models/
|
| 136 |
--output outputs/zero_shot_results.json \
|
| 137 |
--batch-size 64
|
| 138 |
```
|
|
|
|
| 2 |
|
| 3 |
CropVLM is a CLIP-based zero-shot image classifier adapted for crop and fruit recognition. It compares one image embedding against text embeddings for candidate class names, then returns the class with the highest cosine similarity.
|
| 4 |
|
| 5 |
+
**Links:** [GitHub repository](https://github.com/boudiafA/CropVLM)
|
| 6 |
+
|
| 7 |

|
| 8 |
|
| 9 |
This repository contains:
|
|
|
|
| 60 |
This Hugging Face repository includes the CropVLM checkpoint:
|
| 61 |
|
| 62 |
```text
|
| 63 |
+
models/CropVLM.pth
|
| 64 |
```
|
| 65 |
|
| 66 |
You can download it with `huggingface_hub`:
|
|
|
|
| 70 |
|
| 71 |
checkpoint = hf_hub_download(
|
| 72 |
repo_id="boudiafA/CropVLM",
|
| 73 |
+
filename="models/CropVLM.pth",
|
| 74 |
)
|
| 75 |
```
|
| 76 |
|
|
|
|
| 82 |
|
| 83 |
```bash
|
| 84 |
python scripts/gradio_demo.py \
|
| 85 |
+
--checkpoint models/CropVLM.pth
|
| 86 |
```
|
| 87 |
|
| 88 |
Then open:
|
|
|
|
| 109 |
from PIL import Image
|
| 110 |
from cropvlm import load_cropvlm
|
| 111 |
|
| 112 |
+
classifier = load_cropvlm("models/CropVLM.pth")
|
| 113 |
image = Image.open("examples/cacao.png")
|
| 114 |
|
| 115 |
for label, score in classifier.predict(image, top_k=5):
|
|
|
|
| 134 |
```bash
|
| 135 |
python scripts/evaluate_zero_shot.py \
|
| 136 |
--dataset /mnt/e/Desktop/Datasets/FruitDataset/Crop_Dataset_testing \
|
| 137 |
+
--cropvlm-checkpoint models/CropVLM.pth \
|
| 138 |
--output outputs/zero_shot_results.json \
|
| 139 |
--batch-size 64
|
| 140 |
```
|
scripts/evaluate_zero_shot.py
CHANGED
|
@@ -336,7 +336,7 @@ def main():
|
|
| 336 |
parser = argparse.ArgumentParser()
|
| 337 |
parser.add_argument("--dataset", required=True, help="ImageFolder-style dataset root.")
|
| 338 |
parser.add_argument("--output", default="outputs/zero_shot_results.json")
|
| 339 |
-
parser.add_argument("--cropvlm-checkpoint", default="models/
|
| 340 |
parser.add_argument("--models", nargs="+", default=DEFAULT_MODELS)
|
| 341 |
parser.add_argument("--device", default=None)
|
| 342 |
parser.add_argument("--batch-size", type=int, default=64)
|
|
|
|
| 336 |
parser = argparse.ArgumentParser()
|
| 337 |
parser.add_argument("--dataset", required=True, help="ImageFolder-style dataset root.")
|
| 338 |
parser.add_argument("--output", default="outputs/zero_shot_results.json")
|
| 339 |
+
parser.add_argument("--cropvlm-checkpoint", default="models/CropVLM.pth")
|
| 340 |
parser.add_argument("--models", nargs="+", default=DEFAULT_MODELS)
|
| 341 |
parser.add_argument("--device", default=None)
|
| 342 |
parser.add_argument("--batch-size", type=int, default=64)
|
scripts/gradio_demo.py
CHANGED
|
@@ -99,7 +99,7 @@ def build_demo(checkpoint: str, device: str | None, prompt_template: str, top_k:
|
|
| 99 |
|
| 100 |
def main():
|
| 101 |
parser = argparse.ArgumentParser()
|
| 102 |
-
parser.add_argument("--checkpoint", default="models/
|
| 103 |
parser.add_argument("--device", default=None)
|
| 104 |
parser.add_argument("--prompt-template", default="{}")
|
| 105 |
parser.add_argument("--top-k", type=int, default=5)
|
|
|
|
| 99 |
|
| 100 |
def main():
|
| 101 |
parser = argparse.ArgumentParser()
|
| 102 |
+
parser.add_argument("--checkpoint", default="models/CropVLM.pth")
|
| 103 |
parser.add_argument("--device", default=None)
|
| 104 |
parser.add_argument("--prompt-template", default="{}")
|
| 105 |
parser.add_argument("--top-k", type=int, default=5)
|