-
Notifications
You must be signed in to change notification settings - Fork 0
/
autoblurface.py
153 lines (121 loc) · 5.23 KB
/
autoblurface.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import os
from ultralytics import YOLO
import cv2
import time
class AutoBlurFace:
"""
This class provides functionality to automatically detect faces in a video stream
and apply a blurring effect to them.
"""
def __init__(self, model_path, show_video=False, save_video=False, apply_blur=False, enlargement_factor=10,
output_video_path='output.mp4'):
"""
Initializes the AutoBlurFace class with the specified parameters.
Parameters:
model_path (str): Path to the YOLO model file.
output_video_path (str): Path for saving the output video.
enlargement_factor (int): Factor to enlarge the detected face area.
show_video (bool): Flag to display the video during processing.
save_video (bool): Flag to save the processed video.
apply_blur (bool): Flag to apply blurring effect on detected faces.
"""
self.model = self._load_model(model_path)
self.output_video_path = output_video_path
self.enlargement_factor = enlargement_factor
self.show_video = show_video
self.save_video = save_video
self.apply_blur = apply_blur
self.video_writer = None
def _load_model(self, model_path):
"""
Loads the YOLO model from the specified path.
Parameters:
model_path (str): Path to the YOLO model file.
Returns:
YOLO: Loaded YOLO model.
"""
try:
model = YOLO(model_path)
print('YoloFaceModel loaded successfully.')
return model
except Exception as e:
print(f'Failed to load the model: {e}')
return None
def _apply_blur_to_faces(self, frame, detections):
"""
Applies blurring to areas around the detected faces, handling image edges.
Parameters:
frame (numpy.ndarray): The current video frame.
detections: Detected objects in the frame.
Returns:
numpy.ndarray: The frame with blurred faces.
"""
height, width = frame.shape[:2]
for detection in detections:
for xyxy in detection.boxes.xyxy:
x1, y1, x2, y2 = map(int, xyxy)
width_enlargement = int((x2 - x1) * self.enlargement_factor / 100)
height_enlargement = int((y2 - y1) * self.enlargement_factor / 100)
x1 = max(0, x1 - width_enlargement)
y1 = max(0, y1 - height_enlargement)
x2 = min(width, x2 + width_enlargement)
y2 = min(height, y2 + height_enlargement)
face_region = frame[y1:y2, x1:x2]
blurred_face = cv2.GaussianBlur(face_region, (99, 99), 30)
frame[y1:y2, x1:x2] = blurred_face
return frame
def _process_frame(self, frame):
"""
Processes a single frame to detect faces and apply blurring.
Parameters:
frame (numpy.ndarray): The current video frame.
Returns:
numpy.ndarray: The processed frame.
"""
detections = self.model(frame)
if self.apply_blur:
frame = self._apply_blur_to_faces(frame, detections)
return frame
def run(self, video_path, use_webcam=False):
"""
Runs the face detection and blurring process on a video file or webcam feed.
Parameters:
video_path (str): Path to the input video file.
use_webcam (bool): Flag to use webcam as input instead of a file.
"""
if use_webcam:
video = cv2.VideoCapture(0)
else:
video = cv2.VideoCapture(video_path)
if not video.isOpened():
raise Exception('Failed to load the video or webcam.')
if self.save_video:
frame_rate = int(video.get(cv2.CAP_PROP_FPS))
width, height = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'H264')
self.video_writer = cv2.VideoWriter(self.output_video_path, fourcc, frame_rate, (width, height))
start_time = time.time()
frame_count = 0
while video.isOpened():
ret, frame = video.read()
if not ret:
break
processed_frame = self._process_frame(frame)
if self.show_video:
cv2.imshow('Processed Frame', cv2.resize(processed_frame, (1280, 720)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if self.save_video:
self.video_writer.write(processed_frame)
frame_count += 1
elapsed_time = time.time() - start_time
print(
f'Processed {frame_count} frames in {int(elapsed_time // 60)} minutes and {int(elapsed_time % 60)} seconds.')
if self.save_video and self.video_writer is not None:
self.video_writer.release()
video.release()
cv2.destroyAllWindows()
# Usage example
auto_blur_face = AutoBlurFace('yolov8n-face.pt', output_video_path='face-video-blurred.mp4', show_video=False, save_video=True,
apply_blur=True)
auto_blur_face.run('face-video.mp4', use_webcam=False)