gijl commited on
Commit
e71b745
·
verified ·
1 Parent(s): 121d708

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +46 -0
main.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, Form, HTTPException
2
+ from fastapi.responses import HTMLResponse
3
+ from transformers import AutoProcessor, AutoModelForImageTextToText
4
+ import torch
5
+ from PIL import Image
6
+ import io
7
+ import os
8
+
9
+ app = FastAPI()
10
+
11
+ model_id = "gijl/gemma-4-E4B-it"
12
+
13
+ # تحميل المعالج والنموذج
14
+ print("جاري تحميل النموذج...")
15
+ processor = AutoProcessor.from_pretrained(model_id)
16
+ model = AutoModelForImageTextToText.from_pretrained(
17
+ model_id,
18
+ torch_dtype=torch.bfloat16,
19
+ low_cpu_mem_usage=True,
20
+ device_map="auto"
21
+ )
22
+
23
+ # عرض واجهة React عند الدخول للموقع
24
+ @app.get("/")
25
+ async def read_index():
26
+ with open("index.html", "r", encoding="utf-8") as f:
27
+ return HTMLResponse(content=f.read())
28
+
29
+ # نقطة النهاية (Endpoint) لاستقبال الصورة والنص وإرجاع النتيجة
30
+ @app.post("/generate")
31
+ async def generate_text(image: UploadFile = File(...), text: str = Form(...)):
32
+ try:
33
+ image_data = await image.read()
34
+ pil_image = Image.open(io.BytesIO(image_data)).convert("RGB")
35
+
36
+ inputs = processor(text=text, images=pil_image, return_tensors="pt")
37
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
38
+
39
+ with torch.no_grad():
40
+ generated_ids = model.generate(**inputs, max_new_tokens=100)
41
+
42
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
43
+ return {"result": generated_text}
44
+
45
+ except Exception as e:
46
+ raise HTTPException(status_code=500, detail=str(e))