Sriomdash commited on
Commit
0855c44
·
verified ·
1 Parent(s): fae5cc4

Added Code files

Browse files
Files changed (5) hide show
  1. .gitignore +40 -0
  2. Dockerfile +17 -0
  3. best_fruit_model.h5 +3 -0
  4. main.py +124 -0
  5. requirements.txt +7 -0
.gitignore ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python bytecode and cache
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Environments
7
+ .venv/
8
+ venv/
9
+ ENV/
10
+ env/
11
+ bin/
12
+ lib/
13
+ include/
14
+
15
+ # AI Models & Large Files
16
+ # (Usually better to keep models on Google Drive/OneDrive, not Git)
17
+ *.h5
18
+ *.keras
19
+ *.pkl
20
+ *.pt
21
+ *.pth
22
+ *.weights
23
+
24
+ # Logs and databases
25
+ *.log
26
+ api.log
27
+ *.db
28
+ *.sqlite3
29
+
30
+ # OS related files
31
+ .DS_Store
32
+ Thumbs.db
33
+
34
+ # Environment variables (Crucial for security)
35
+ .env
36
+ .flaskenv
37
+
38
+ # Editor/IDE specific
39
+ .vscode/
40
+ .idea/
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies for Pillow
6
+ RUN apt-get update && apt-get install -y \
7
+ libgl1-mesa-glx \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ COPY . .
11
+
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+
14
+ # Port 7860 is the default for Hugging Face
15
+ EXPOSE 7860
16
+
17
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
best_fruit_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e5a62e15c7b6881e13a557e8e740e6e922a8be6f03e145bfc07a055009342a8
3
+ size 40997712
main.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException, Request
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ import uvicorn
4
+ from fastapi.responses import JSONResponse
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ from PIL import Image
8
+ import io
9
+ import os
10
+ import slowapi
11
+ from slowapi import Limiter, _rate_limit_exceeded_handler
12
+ from slowapi.util import get_remote_address
13
+ from slowapi.errors import RateLimitExceeded
14
+ import logging
15
+
16
+
17
+ logging.basicConfig(
18
+ filename='api.log',
19
+ level=logging.INFO,
20
+ format='%(asctime)s - %(levelname)s - %(message)s'
21
+ )
22
+
23
+ app = FastAPI(
24
+ title="Cloud Inventory AI API",
25
+ description="API for scanning fruits and returning the Fruit Name and Quality.",
26
+ root_path="/api"
27
+ )
28
+
29
+ app.add_middleware(
30
+ CORSMiddleware,
31
+ allow_origins=["*"],
32
+ allow_credentials=False,
33
+ allow_methods=["GET", "POST"],
34
+ allow_headers=["*"],
35
+ )
36
+
37
+ # Use get_remote_address which is safer and handles proxies better than x.client.ip
38
+ limiter = Limiter(key_func=get_remote_address)
39
+ app.state.limiter = limiter
40
+
41
+ # Register the exception handler so rate-limited users get a proper HTTP 429 response
42
+ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
43
+
44
+ class CastLayer(tf.keras.layers.Layer):
45
+ def call(self, inputs):
46
+ return tf.cast(inputs, tf.float32)
47
+
48
+ MODEL_PATH = "best_fruit_model.h5"
49
+ model = None
50
+
51
+ # 3. Replace print() with logging
52
+ if os.path.exists(MODEL_PATH):
53
+ try:
54
+ custom_objects = {'Cast': CastLayer}
55
+ model = tf.keras.models.load_model(MODEL_PATH, custom_objects=custom_objects, compile=False)
56
+ logging.info("Model loaded successfully!")
57
+ except Exception as e:
58
+ logging.error(f"Error loading model: {e}")
59
+ else:
60
+ logging.warning(f"Warning: Model not found at {MODEL_PATH}")
61
+
62
+ # Your 20-class list
63
+ CLASS_NAMES = [
64
+ 'fresh_apple', 'fresh_banana', 'fresh_cucumber', 'fresh_grape',
65
+ 'fresh_guava', 'fresh_mango', 'fresh_orange', 'fresh_pomegranate',
66
+ 'fresh_strawberry', 'fresh_tomato', 'rotten_apple', 'rotten_banana',
67
+ 'rotten_cucumber', 'rotten_grape', 'rotten_guava', 'rotten_mango',
68
+ 'rotten_orange', 'rotten_pomegranate', 'rotten_strawberry', 'rotten_tomato'
69
+ ]
70
+
71
+ @app.get("/")
72
+ @limiter.limit("40/minute")
73
+ async def root_call(request: Request):
74
+ logging.info(f"Root endpoint accessed by {request.client.host}")
75
+ return {"message": "Fruit Quality API is running. Go to /docs to test it."}
76
+
77
+ @app.get("/health")
78
+ @limiter.limit("40/minute")
79
+ async def health_call(request: Request):
80
+ if model is None:
81
+ logging.warning("Health check failed: Model not loaded.")
82
+ return {"status": "unhealthy", "reason": "Model missing or failed to load."}
83
+ return {"status": "healthy", "model_loaded": True}
84
+
85
+ @app.post("/predict")
86
+ @limiter.limit("40/minute")
87
+ async def predict_image(request: Request, file: UploadFile = File(...)):
88
+ logging.info(f"Prediction request received from {request.client.host} for file {file.filename}")
89
+
90
+ if model is None:
91
+ logging.error("Prediction attempted, but model is not loaded.")
92
+ raise HTTPException(status_code=503, detail="Model is not loaded.")
93
+
94
+ if not file.content_type.startswith("image/"):
95
+ logging.warning(f"Invalid file type uploaded: {file.content_type}")
96
+ raise HTTPException(status_code=400, detail="Invalid file. Upload an image.")
97
+
98
+ try:
99
+ contents = await file.read()
100
+ img = Image.open(io.BytesIO(contents)).convert('RGB')
101
+
102
+ img = img.resize((224, 224))
103
+ img_arr = np.array(img) / 255.0
104
+ img_arr = np.expand_dims(img_arr, axis=0)
105
+
106
+ preds = model.predict(img_arr, verbose=0)
107
+ idx = int(np.argmax(preds[0]))
108
+ raw_label = CLASS_NAMES[idx]
109
+
110
+ parts = raw_label.split('_', 1)
111
+
112
+ quality = parts[0].capitalize()
113
+ fruit_name = parts[1].title()
114
+
115
+ logging.info(f"Prediction successful: {quality} {fruit_name}")
116
+
117
+ return JSONResponse(content={
118
+ "fruit": fruit_name,
119
+ "quality": quality
120
+ })
121
+
122
+ except Exception as e:
123
+ logging.error(f"Server error during prediction: {str(e)}")
124
+ raise HTTPException(status_code=500, detail=f"Server error: {str(e)}")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ tensorflow-cpu
4
+ numpy
5
+ pillow
6
+ python-multipart
7
+ slowapi