Spaces:
Sleeping
Sleeping
GERA commited on
Commit ·
50f5037
1
Parent(s): f5f1144
first commit
Browse files- .gitattributes +2 -0
- 09_pretrained_effnetb2_feature_extractor_20_percent.pth +3 -0
- app.py +65 -0
- examples/2582289.jpg +0 -0
- examples/3622237.jpg +0 -0
- examples/592799.jpg +0 -0
- model.py +22 -0
- requirements.txt +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
09_pretrained_effnetb2_feature_extractor_20_percent.pth filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*pth filter=lfs diff=lfs merge=lfs -text
|
09_pretrained_effnetb2_feature_extractor_20_percent.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66cbafff5c12080c4c626f0b6099b8434b092f67990af02a3db861a2e2056406
|
| 3 |
+
size 31299697
|
app.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision
|
| 4 |
+
from torch import nn
|
| 5 |
+
from timeit import default_timer as timer
|
| 6 |
+
from typing import Tuple,Dict
|
| 7 |
+
import os
|
| 8 |
+
from model import create_effnetb2
|
| 9 |
+
|
| 10 |
+
# gettingthe classnames
|
| 11 |
+
class_names = ["pizza","steak","sushi"]
|
| 12 |
+
|
| 13 |
+
#lets verify if we can get list of example
|
| 14 |
+
foodvision_mini_examples_path = "examples/"
|
| 15 |
+
example_list = ["examples/" + example for example in os.listdir(foodvision_mini_examples_path)]
|
| 16 |
+
|
| 17 |
+
# getting model and its trasofrm
|
| 18 |
+
effnetb2_2, effnetb2_transforms_2 = model.create_effnetb2()
|
| 19 |
+
|
| 20 |
+
# load the saved weigths
|
| 21 |
+
effnetb2_2.load_state_dict(
|
| 22 |
+
torch.load(
|
| 23 |
+
f = "09_pretrained_effnetb2_feature_extractor_20_percent",
|
| 24 |
+
map_location= torch.device("cpu")
|
| 25 |
+
)
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# craeting the predict function
|
| 29 |
+
def predict(img) -> Tuple[Dict,float]:
|
| 30 |
+
# start a timer
|
| 31 |
+
start_time = timer()
|
| 32 |
+
|
| 33 |
+
# transfomr the input image for use with EffNetB2
|
| 34 |
+
transformed_img = effnetb2_transforms_2(img).unsqueeze(0).to("cpu")
|
| 35 |
+
|
| 36 |
+
# put the mdoel to eva; mode and make predictions
|
| 37 |
+
effnetb2.eval()
|
| 38 |
+
with torch.inference_mode():
|
| 39 |
+
logits = effnetb2_2(transformed_img)
|
| 40 |
+
pred_probs = torch.softmax(logits, dim = 1)
|
| 41 |
+
# print(pred_probs)
|
| 42 |
+
pred_class = class_names[torch.argmax(pred_probs, dim = 1)]
|
| 43 |
+
|
| 44 |
+
pred_labels_and_probs = {class_names[i] : float(pred_probs[0][i]) for i in range(len(class_names))}
|
| 45 |
+
# calculate pred time
|
| 46 |
+
end_time = timer()
|
| 47 |
+
pred_time = round(end_time - start_time, 4)
|
| 48 |
+
|
| 49 |
+
# return pred dict AND PRED TIME
|
| 50 |
+
return pred_labels_and_probs, pred_time
|
| 51 |
+
|
| 52 |
+
# interface zone
|
| 53 |
+
import gradio as gr
|
| 54 |
+
|
| 55 |
+
# craete title , description and article
|
| 56 |
+
title = "Food Vision Mini 🍕🥩🍣"
|
| 57 |
+
description = "An [EfficientNetB2 Feature Extractor Computer Vision Model](https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html) to classify images as pizza, steak and sushi"
|
| 58 |
+
article = "Created at [09 PyTorch model deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)"
|
| 59 |
+
|
| 60 |
+
# craeting gradio demo
|
| 61 |
+
demo = gr.Interface(fn = predict,inputs = gr.Image(type = "pil"),
|
| 62 |
+
outputs = [gr.Label(num_top_classes=3, label = "predictions") , gr.Number(label = "prediciton time(s)")],
|
| 63 |
+
examples = example_list,
|
| 64 |
+
title = title , description = description, article = article)
|
| 65 |
+
demo.launch(debug = False,share = True) # to avoid showing of error
|
examples/2582289.jpg
ADDED
|
examples/3622237.jpg
ADDED
|
examples/592799.jpg
ADDED
|
model.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
def create_effnetb2(seed : int = 42, num_classes : int = 3):
|
| 7 |
+
#1,2,3 create model , weights and transforms
|
| 8 |
+
weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
| 9 |
+
transform = weights.transforms()
|
| 10 |
+
model = torchvision.models.efficientnet_b2(weights = weights)
|
| 11 |
+
|
| 12 |
+
# frezzing the base layers
|
| 13 |
+
for param in model.parameters():
|
| 14 |
+
param.requires_grad = False
|
| 15 |
+
|
| 16 |
+
#5 updating the clasiifier head for our model
|
| 17 |
+
torch.manual_seed(seed)
|
| 18 |
+
model.classifier = nn.Sequential(
|
| 19 |
+
nn.Dropout(p = 0.3, inplace = True),
|
| 20 |
+
nn.Linear(in_features = 1408,out_features = num_classes)
|
| 21 |
+
)
|
| 22 |
+
return model, transform
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch == 2.0.1
|
| 2 |
+
torchvision == 0.15.2
|
| 3 |
+
gradio == 3.39.0
|