Shreyas1441AI commited on
Commit
c9b6800
·
verified ·
1 Parent(s): f61cd7e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import gradio as gr
4
+ import tempfile
5
+ import os
6
+ from tqdm import tqdm
7
+
8
+ # ----------------------------
9
+ # LOAD MODEL (GLOBAL)
10
+ # ----------------------------
11
+ MODEL_PATH = "mosaic.t7" # place model in repo root
12
+ net = cv2.dnn.readNetFromTorch(MODEL_PATH)
13
+
14
+
15
+ def style_video(input_video):
16
+ # ----------------------------
17
+ # OPEN INPUT VIDEO
18
+ # ----------------------------
19
+ cap = cv2.VideoCapture(input_video)
20
+ if not cap.isOpened():
21
+ raise RuntimeError("Could not open video")
22
+
23
+ fps = cap.get(cv2.CAP_PROP_FPS)
24
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
25
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
26
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
27
+
28
+ # ----------------------------
29
+ # TEMP OUTPUT FILE
30
+ # ----------------------------
31
+ temp_out = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
32
+ temp_out.close()
33
+
34
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
35
+ writer = cv2.VideoWriter(
36
+ temp_out.name,
37
+ fourcc,
38
+ fps,
39
+ (width, height)
40
+ )
41
+
42
+ # ----------------------------
43
+ # PROCESS FRAMES
44
+ # ----------------------------
45
+ for _ in tqdm(range(total_frames), desc="Styling frames"):
46
+ ret, frame = cap.read()
47
+ if not ret:
48
+ break
49
+
50
+ blob = cv2.dnn.blobFromImage(
51
+ frame,
52
+ 1.0,
53
+ (width, height),
54
+ (103.939, 116.779, 123.680),
55
+ swapRB=False,
56
+ crop=False
57
+ )
58
+
59
+ net.setInput(blob)
60
+ output = net.forward()
61
+
62
+ output = output.reshape(3, output.shape[2], output.shape[3])
63
+ output[0] += 103.939
64
+ output[1] += 116.779
65
+ output[2] += 123.680
66
+ output = output.transpose(1, 2, 0)
67
+
68
+ output = np.clip(output, 0, 255).astype("uint8")
69
+ writer.write(output)
70
+
71
+ # ----------------------------
72
+ # CLEANUP
73
+ # ----------------------------
74
+ cap.release()
75
+ writer.release()
76
+
77
+ return temp_out.name
78
+
79
+
80
+ # ----------------------------
81
+ # GRADIO UI
82
+ # ----------------------------
83
+ app = gr.Interface(
84
+ fn=style_video,
85
+ inputs=gr.Video(label="Upload Video"),
86
+ outputs=gr.Video(label="Styled Video"),
87
+ title="Neural Style Transfer on Video",
88
+ description="Applies fast neural style transfer (Torch .t7) frame-by-frame using OpenCV."
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ app.launch()