Upload 2 files
Browse files- config.py +13 -0
- cropper-2.py +89 -0
config.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# config.py
|
| 2 |
+
|
| 3 |
+
# Variables for renamer.py
|
| 4 |
+
BASE_DIR = r"E:\studio Dropbox\studio\projects\TheFold\3models\SachaQuenby\1prep\2clean"
|
| 5 |
+
CROP_CATEGORY = ["1closeup", "2mid", "3full"]
|
| 6 |
+
STYLE_CATEGORIES = ["4fashion", "5natural", "6beauty", "7catwalk"]
|
| 7 |
+
|
| 8 |
+
# Variables for cropper.py
|
| 9 |
+
INPUT_FOLDER = r"E:\studio Dropbox\studio\projects\TheFold\3models\SachaQuenby\1prep\2crop"
|
| 10 |
+
OUTPUT_FOLDER = r"E:\studio Dropbox\studio\projects\TheFold\3models\SachaQuenby\1prep\2crop\cropped"
|
| 11 |
+
YOLO_DETECTED_FOLDER = r"path"
|
| 12 |
+
ASPECT_RATIOS = [(1024, 1280), (1280, 1024), (1024, 1536), (1536, 1024), (1024, 1024)]
|
| 13 |
+
SAVE_TO_YOLO_DETECTED_FOLDER = False
|
cropper-2.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import cv2
|
| 3 |
+
import os
|
| 4 |
+
import threading
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import sys
|
| 7 |
+
sys.path.insert(0, 'E:/studio Dropbox/studio/ai/libs/notes')
|
| 8 |
+
|
| 9 |
+
print(sys.executable)
|
| 10 |
+
|
| 11 |
+
# Import variables from config.py
|
| 12 |
+
from dataset_prep.config import INPUT_FOLDER, OUTPUT_FOLDER, YOLO_DETECTED_FOLDER, ASPECT_RATIOS, SAVE_TO_YOLO_DETECTED_FOLDER
|
| 13 |
+
|
| 14 |
+
# Define the locks
|
| 15 |
+
counter_lock = threading.Lock()
|
| 16 |
+
model_lock = threading.Lock()
|
| 17 |
+
|
| 18 |
+
def resize_bbox_to_dimensions(bbox, target_width, target_height, img_width, img_height):
|
| 19 |
+
x1, y1, x2, y2 = bbox
|
| 20 |
+
current_width = x2 - x1
|
| 21 |
+
current_height = y2 - y1
|
| 22 |
+
desired_aspect_ratio = target_width / target_height
|
| 23 |
+
current_aspect_ratio = current_width / current_height
|
| 24 |
+
|
| 25 |
+
print(f"Original bbox: {bbox}")
|
| 26 |
+
print(f"Current aspect ratio: {current_aspect_ratio}")
|
| 27 |
+
print(f"Desired aspect ratio: {desired_aspect_ratio}")
|
| 28 |
+
|
| 29 |
+
if current_aspect_ratio < desired_aspect_ratio:
|
| 30 |
+
new_width = desired_aspect_ratio * current_height
|
| 31 |
+
x1 -= (new_width - current_width) / 2
|
| 32 |
+
x2 += (new_width - current_width) / 2
|
| 33 |
+
elif current_aspect_ratio > desired_aspect_ratio:
|
| 34 |
+
new_height = current_width / desired_aspect_ratio
|
| 35 |
+
y1 -= (new_height - current_height) / 2
|
| 36 |
+
y2 += (new_height - current_height) / 2
|
| 37 |
+
|
| 38 |
+
x1 = max(x1, 0)
|
| 39 |
+
y1 = max(y1, 0)
|
| 40 |
+
x2 = min(x2, img_width)
|
| 41 |
+
y2 = min(y2, img_height)
|
| 42 |
+
|
| 43 |
+
new_bbox = [int(x1), int(y1), int(x2), int(y2)]
|
| 44 |
+
print(f"New bbox: {new_bbox}")
|
| 45 |
+
|
| 46 |
+
return new_bbox
|
| 47 |
+
|
| 48 |
+
def process_files(filelist):
|
| 49 |
+
global image_processed_counter
|
| 50 |
+
with counter_lock:
|
| 51 |
+
model = torch.hub.load('WongKinYiu/yolov7', 'custom', 'yolov7-e6e.pt', force_reload=False, trust_repo=True)
|
| 52 |
+
for filename in filelist:
|
| 53 |
+
try: # Start of the try block
|
| 54 |
+
img_path = os.path.join(INPUT_FOLDER, filename)
|
| 55 |
+
image = cv2.imread(img_path)
|
| 56 |
+
if image is None:
|
| 57 |
+
raise ValueError(f"Could not read image {filename}")
|
| 58 |
+
img_width, img_height = image.shape[1], image.shape[0]
|
| 59 |
+
|
| 60 |
+
with model_lock:
|
| 61 |
+
results = model(img_path)
|
| 62 |
+
detections = results.pandas().xyxy[0]
|
| 63 |
+
|
| 64 |
+
person_detected = detections[detections['name'] == 'person']
|
| 65 |
+
print(f"Person detected: {not person_detected.empty}")
|
| 66 |
+
if not person_detected.empty:
|
| 67 |
+
x1, y1, x2, y2 = person_detected.iloc[0][['xmin', 'ymin', 'xmax', 'ymax']].astype(int)
|
| 68 |
+
|
| 69 |
+
for target_width, target_height in ASPECT_RATIOS:
|
| 70 |
+
new_x1, new_y1, new_x2, new_y2 = resize_bbox_to_dimensions([x1, y1, x2, y2], target_width, target_height, img_width, img_height)
|
| 71 |
+
new_x1, new_y1 = max(new_x1, 0), max(new_y1, 0)
|
| 72 |
+
new_x2, new_y2 = min(new_x2, img_width), min(new_y2, img_height)
|
| 73 |
+
cropped_img = image[new_y1:new_y2, new_x1:new_x2]
|
| 74 |
+
|
| 75 |
+
# Create a folder for each aspect ratio if it doesn't exist
|
| 76 |
+
aspect_ratio_folder = f"{target_width}_{target_height}"
|
| 77 |
+
aspect_ratio_path = os.path.join(OUTPUT_FOLDER, aspect_ratio_folder)
|
| 78 |
+
os.makedirs(aspect_ratio_path, exist_ok=True)
|
| 79 |
+
|
| 80 |
+
# Save the cropped image to the corresponding folder
|
| 81 |
+
output_filename = os.path.join(aspect_ratio_path, f"cropped_{filename}")
|
| 82 |
+
cv2.imwrite(output_filename, cropped_img)
|
| 83 |
+
|
| 84 |
+
except Exception as e:
|
| 85 |
+
print(f"An error occurred while processing file {filename}: {e}")
|
| 86 |
+
|
| 87 |
+
if __name__ == "__main__":
|
| 88 |
+
filelist = os.listdir(INPUT_FOLDER)
|
| 89 |
+
process_files(filelist)
|