ZENLLC commited on
Commit
8eaf800
·
verified ·
1 Parent(s): 05a7cf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -53
app.py CHANGED
@@ -1,71 +1,79 @@
1
  import gradio as gr
2
- import torch
3
- from transformers import DetrImageProcessor, DetrForObjectDetection, pipeline
4
  from PIL import Image, ImageDraw
5
- import numpy as np
6
-
7
- # Load Object Detection model (DETR)
8
- processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
9
- model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
10
 
11
- # Load Classification pipeline (ViT)
12
- classifier = pipeline("image-classification", model="google/vit-base-patch16-224")
 
 
 
 
13
 
14
- def process_analysis(input_img):
15
- if input_img is None:
16
- return None, "No image provided.", "No data."
17
 
18
- # 1. Object Detection
19
- inputs = processor(images=input_img, return_tensors="pt")
20
- outputs = model(**inputs)
21
 
22
- # Convert outputs (bounding boxes and class logits) to COCO API
23
- target_sizes = torch.tensor([input_img.size[::-1]])
24
- results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0]
25
 
26
- # Draw boxes
27
- draw = ImageDraw.Draw(input_img)
28
- detection_log = []
29
- for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
30
- box = [round(i, 2) for i in box.tolist()]
31
- label_name = model.config.id2label[label.item()]
32
- draw.rectangle(box, outline="red", width=3)
33
- draw.text((box[0], box[1]), f"{label_name} {round(score.item(), 3)}", fill="red")
34
- detection_log.append(f"Detected: {label_name} | Confidence: {round(score.item(), 4)} | Box: {box}")
35
 
36
- # 2. Classification
37
- class_results = classifier(input_img)
38
- class_text = "\n".join([f"{res['label']}: {round(res['score'], 4)}" for res in class_results])
39
 
40
- detection_text = "\n".join(detection_log) if detection_log else "No high-confidence objects detected."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- return input_img, class_text, detection_text
43
 
44
- # UI Definition
45
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="slate")) as demo:
46
- gr.Markdown("# 🛡DeepVision Forensic Suite")
47
- gr.Markdown("**Technical Utility:** Multi-model image analysis for object localization and semantic classification.")
48
 
49
  with gr.Row():
50
- with gr.Column():
51
- input_image = gr.Image(type="pil", label="Input Source")
52
- analyze_btn = gr.Button("RUN ANALYSIS", variant="primary")
53
 
54
- with gr.Column():
55
- output_image = gr.Image(type="pil", label="Object Localization (90% Threshold)")
 
56
 
57
- with gr.Row():
58
- with gr.Column():
59
- gr.Markdown("### Semantic Classification (ViT)")
60
- class_out = gr.Textbox(label="Top Predictions", lines=5)
61
- with gr.Column():
62
- gr.Markdown("### Detection Metadata (DETR)")
63
- detect_out = gr.Textbox(label="Spatial Coordinates", lines=5)
64
-
65
- analyze_btn.click(
66
- fn=process_analysis,
67
- inputs=[input_image],
68
- outputs=[output_image, class_out, detect_out]
69
  )
70
 
71
  if __name__ == "__main__":
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
3
  from PIL import Image, ImageDraw
4
+ import torch
 
 
 
 
5
 
6
+ # Initialize the detection pipeline using the DETR architecture
7
+ # This model runs locally within the Space environment
8
+ try:
9
+ detector = pipeline("object-detection", model="facebook/detr-resnet-50")
10
+ except Exception as e:
11
+ detector = None
12
 
13
+ def analyze_system(image):
14
+ if image is None:
15
+ return None, {"status": "error", "message": "No input signal detected."}
16
 
17
+ if detector is None:
18
+ return image, {"status": "error", "message": "Model initialization failed."}
 
19
 
20
+ # Perform high-precision inference
21
+ predictions = detector(image)
 
22
 
23
+ # Prepare drawing context for visual telemetry
24
+ annotated_image = image.copy()
25
+ draw = ImageDraw.Draw(annotated_image)
 
 
 
 
 
 
26
 
27
+ telemetry_report = []
 
 
28
 
29
+ for pred in predictions:
30
+ box = pred["box"]
31
+ label = pred["label"]
32
+ score = pred["score"]
33
+
34
+ # Extract spatial coordinates
35
+ xmin, ymin, xmax, ymax = box["xmin"], box["ymin"], box["xmax"], box["ymax"]
36
+
37
+ # Draw identification borders using a high-contrast industrial green
38
+ draw.rectangle([xmin, ymin, xmax, ymax], outline="#00FF00", width=4)
39
+
40
+ # Compile telemetry data
41
+ telemetry_report.append({
42
+ "component_class": label,
43
+ "confidence_rating": round(float(score), 4),
44
+ "spatial_coordinates": {
45
+ "xmin": xmin,
46
+ "ymin": ymin,
47
+ "xmax": xmax,
48
+ "ymax": ymax
49
+ }
50
+ })
51
 
52
+ return annotated_image, telemetry_report
53
 
54
+ # Construct the Gradio Interface with a technical, utility-focused theme
55
+ with gr.Blocks(theme=gr.themes.Monochrome(primary_hue="blue")) as demo:
56
+ gr.Markdown("# 🛰Neural Industrial Inspector")
57
+ gr.Markdown("**System Status**: Operational | **Core**: DETR-ResNet-50 Transformer")
58
 
59
  with gr.Row():
60
+ with gr.Column(scale=1):
61
+ input_img = gr.Image(type="pil", label="Optical System Feed")
62
+ run_btn = gr.Button("INITIATE SYSTEM SCAN", variant="primary")
63
 
64
+ with gr.Column(scale=1):
65
+ output_img = gr.Image(type="pil", label="Visual Diagnostic Overlay")
66
+ output_data = gr.JSON(label="Structured Telemetry Data")
67
 
68
+ gr.Examples(
69
+ examples=[],
70
+ inputs=input_img
71
+ )
72
+
73
+ run_btn.click(
74
+ fn=analyze_system,
75
+ inputs=input_img,
76
+ outputs=[output_img, output_data]
 
 
 
77
  )
78
 
79
  if __name__ == "__main__":