Autopixel frapochetti commited on
Commit
6865e91
·
0 Parent(s):

Duplicate from frapochetti/blurry-faces

Browse files

Co-authored-by: Francesco Pochetti <frapochetti@users.noreply.huggingface.co>

.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Blurry Faces
3
+ emoji: 🙈
4
+ colorFrom: pink
5
+ colorTo: blue
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ license: apache-2.0
10
+ duplicated_from: frapochetti/blurry-faces
11
+ ---
12
+
13
+ # Configuration
14
+
15
+ `title`: _string_
16
+ Display title for the Space
17
+
18
+ `emoji`: _string_
19
+ Space emoji (emoji-only character allowed)
20
+
21
+ `colorFrom`: _string_
22
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
23
+
24
+ `colorTo`: _string_
25
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
26
+
27
+ `sdk`: _string_
28
+ Can be either `gradio`, `streamlit`, or `static`
29
+
30
+ `sdk_version` : _string_
31
+ Only applicable for `streamlit` SDK.
32
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
33
+
34
+ `app_file`: _string_
35
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
36
+ Path is relative to the root of the repository.
37
+
38
+ `models`: _List[string]_
39
+ HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
40
+ Will be parsed automatically from your code if not specified here.
41
+
42
+ `datasets`: _List[string]_
43
+ HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
44
+ Will be parsed automatically from your code if not specified here.
45
+
46
+ `pinned`: _boolean_
47
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ from typing import Union, Tuple
4
+ from PIL import Image, ImageOps
5
+ import numpy as np
6
+ import torch
7
+
8
+ model = torch.jit.load('./model/model.pt').eval()
9
+
10
+ def resize_with_padding(img: Image.Image, expected_size: Tuple[int, int]) -> Image.Image:
11
+ img.thumbnail((expected_size[0], expected_size[1]))
12
+ delta_width = expected_size[0] - img.size[0]
13
+ delta_height = expected_size[1] - img.size[1]
14
+ pad_width = delta_width // 2
15
+ pad_height = delta_height // 2
16
+ padding = (pad_width, pad_height, delta_width - pad_width, delta_height - pad_height)
17
+ return ImageOps.expand(img, padding), padding
18
+
19
+ def preprocess_image(img: Image.Image, size: int = 512) -> Tuple[Image.Image, torch.tensor, Tuple[int]]:
20
+ pil_img, padding = resize_with_padding(img, (size, size))
21
+
22
+ img = (np.array(pil_img).astype(np.float32) / 255) - np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3)
23
+ img = img / np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3)
24
+ img = np.transpose(img, (2, 0, 1))
25
+
26
+ return pil_img, torch.tensor(img[None]), padding
27
+
28
+ def soft_blur_with_mask(image: Image.Image, mask: torch.tensor, padding: Tuple[int]) -> Image.Image:
29
+ image = np.array(image)
30
+ # Create a blurred copy of the original image.
31
+ blurred_image = cv2.GaussianBlur(image, (221, 221), sigmaX=20, sigmaY=20)
32
+ image_height, image_width = image.shape[:2]
33
+ mask = cv2.resize(mask.astype(np.uint8), (image_width, image_height), interpolation=cv2.INTER_NEAREST)
34
+ # Blurring the mask itself to get a softer mask with no firm edges
35
+ mask = cv2.GaussianBlur(mask.astype(np.float32), (11, 11), 10, 10)[:, :, None]
36
+
37
+ # Take the blurred image where the mask it positive, and the original image where the image is original
38
+ image = (mask * blurred_image + (1.0 - mask) * image)
39
+ pad_w, pad_h, _, _ = padding
40
+ img_w, img_h, _ = image.shape
41
+ image = image[(pad_h):(img_h-pad_h), (pad_w):(img_w-pad_w), :]
42
+ return Image.fromarray(image.astype(np.uint8))
43
+
44
+ def run(image, size):
45
+ pil_image, torch_image, padding = preprocess_image(image, size=size)
46
+
47
+ with torch.inference_mode():
48
+ mask = model(torch_image)
49
+ mask = mask.argmax(dim=1).numpy().squeeze()
50
+
51
+ return soft_blur_with_mask(pil_image, mask, padding)
52
+
53
+ content_image_input = gr.inputs.Image(label="Content Image", type="pil")
54
+ model_image_size = gr.inputs.Radio([256, 384, 512, 1024], type="value", default=512, label="Inference size")
55
+
56
+ description="Privacy first! Upload an image of a groupf of people and blur their faces automatically."
57
+ article="""
58
+ Demo built on top of a face segmentation model trained from scratch with IceVision on the
59
+ <a href='https://github.com/microsoft/FaceSynthetics' target='_blank'>FaceSynthetics</a> dataset.
60
+ """
61
+ examples = [["./images/girls.jpeg", 384], ["./images/kid.jpeg", 256], ["./images/family.jpeg", 512], ["./images/crowd1.jpeg", 1024], ["./images/crowd2.jpeg", 1024]]
62
+
63
+ app_interface = gr.Interface(fn=run,
64
+ inputs=[content_image_input, model_image_size],
65
+ outputs="image",
66
+ title="Blurry Faces",
67
+ description=description,
68
+ examples=examples,
69
+ article=article)
70
+ app_interface.launch()
face_rec_benchmark.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import face_recognition
2
+ import cv2
3
+ import gradio as gr
4
+ from PIL import Image
5
+ import numpy as np
6
+ import time
7
+
8
+ def run(image):
9
+ image.thumbnail((1280, 1280))
10
+ image = np.array(image)
11
+ face_locations = face_recognition.face_locations(image, model="cnn")
12
+
13
+ for top, right, bottom, left in face_locations:
14
+ face_image = image[top:bottom, left:right]
15
+ face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
16
+ image[top:bottom, left:right] = face_image
17
+
18
+ return Image.fromarray(image)
19
+
20
+ if __name__ == "__main__":
21
+
22
+ start = time.time()
23
+ for _ in range(100):
24
+ image = Image.open("./images/crowd.jpeg")
25
+ _ = run(image)
26
+
27
+ print('It took', (time.time()-start)/100, 'seconds.')
images/crowd1.jpeg ADDED
images/crowd2.jpeg ADDED
images/family.jpeg ADDED
images/girls.jpeg ADDED
images/kid.jpeg ADDED
kornia_benchmark.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import numpy as np
5
+ import torch
6
+ import kornia as K
7
+ from kornia.contrib import FaceDetector, FaceDetectorResult
8
+ import time
9
+
10
+ device = torch.device('cpu')
11
+ face_detection = FaceDetector().to(device)
12
+
13
+ def scale_image(img: np.ndarray, size: int) -> np.ndarray:
14
+ h, w = img.shape[:2]
15
+ scale = 1. * size / w
16
+ return cv2.resize(img, (int(w * scale), int(h * scale)))
17
+
18
+
19
+ def apply_blur_face(img: torch.Tensor, img_vis: np.ndarray, det: FaceDetectorResult):
20
+ # crop the face
21
+ x1, y1 = det.xmin.int(), det.ymin.int()
22
+ x2, y2 = det.xmax.int(), det.ymax.int()
23
+ roi = img[..., y1:y2, x1:x2]
24
+ #print(roi.shape)
25
+ if roi.shape[-1]==0 or roi.shape[-2]==0:
26
+ return
27
+
28
+ # apply blurring and put back to the visualisation image
29
+ roi = K.filters.gaussian_blur2d(roi, (21, 21), (100., 100.))
30
+ roi = K.color.rgb_to_bgr(roi)
31
+ img_vis[y1:y2, x1:x2] = K.tensor_to_image(roi)
32
+
33
+
34
+ def run(image):
35
+ image.thumbnail((1280, 1280))
36
+ img_raw = np.array(image)
37
+
38
+ # preprocess
39
+ img = K.image_to_tensor(img_raw, keepdim=False).to(device)
40
+ img = K.color.bgr_to_rgb(img.float())
41
+
42
+ with torch.no_grad():
43
+ dets = face_detection(img)
44
+ dets = [FaceDetectorResult(o) for o in dets]
45
+
46
+ img_vis = img_raw.copy()
47
+
48
+ for b in dets:
49
+ if b.score < 0.5:
50
+ continue
51
+
52
+ apply_blur_face(img, img_vis, b)
53
+
54
+ return Image.fromarray(img_vis)
55
+
56
+ if __name__ == "__main__":
57
+
58
+ start = time.time()
59
+ for _ in range(100):
60
+ image = Image.open("./images/crowd.jpeg")
61
+ _ = run(image)
62
+
63
+ print('It took', (time.time()-start)/100, 'seconds.')
model/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e310f25944aaf0a35a334798e72aca4494dd19f3785225042017743ecd37757
3
+ size 165321408
packages.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ cmake
2
+ ffmpeg
3
+ libsm6
4
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ opencv-python==4.5.5.62
2
+ kornia==0.6.3