-
Notifications
You must be signed in to change notification settings - Fork 105
/
live-demo.py
185 lines (163 loc) · 8.12 KB
/
live-demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
import os
import sys
import argparse
import ast
import cv2
import time
import torch
from vidgear.gears import CamGear
import numpy as np
sys.path.insert(1, os.getcwd())
from SimpleHRNet import SimpleHRNet
from misc.visualization import draw_points, draw_skeleton, draw_points_and_skeleton, joints_dict, check_video_rotation
from misc.utils import find_person_id_associations
def main(camera_id, filename, hrnet_m, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, image_resolution,
single_person, use_tiny_yolo, disable_tracking, max_batch_size, disable_vidgear, save_video, video_format,
video_framerate, device):
if device is not None:
device = torch.device(device)
else:
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
device = torch.device('cuda')
else:
device = torch.device('cpu')
# print(device)
image_resolution = ast.literal_eval(image_resolution)
has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'
video_writer = None
if filename is not None:
rotation_code = check_video_rotation(filename)
video = cv2.VideoCapture(filename)
assert video.isOpened()
else:
rotation_code = None
if disable_vidgear:
video = cv2.VideoCapture(camera_id)
assert video.isOpened()
else:
video = CamGear(camera_id).start()
if use_tiny_yolo:
yolo_model_def="./models/detectors/yolo/config/yolov3-tiny.cfg"
yolo_class_path="./models/detectors/yolo/data/coco.names"
yolo_weights_path="./models/detectors/yolo/weights/yolov3-tiny.weights"
else:
yolo_model_def="./models/detectors/yolo/config/yolov3.cfg"
yolo_class_path="./models/detectors/yolo/data/coco.names"
yolo_weights_path="./models/detectors/yolo/weights/yolov3.weights"
model = SimpleHRNet(
hrnet_c,
hrnet_j,
hrnet_weights,
model_name=hrnet_m,
resolution=image_resolution,
multiperson=not single_person,
return_bounding_boxes=not disable_tracking,
max_batch_size=max_batch_size,
yolo_model_def=yolo_model_def,
yolo_class_path=yolo_class_path,
yolo_weights_path=yolo_weights_path,
device=device
)
if not disable_tracking:
prev_boxes = None
prev_pts = None
prev_person_ids = None
next_person_id = 0
while True:
t = time.time()
if filename is not None or disable_vidgear:
ret, frame = video.read()
if not ret:
break
if rotation_code is not None:
frame = cv2.rotate(frame, rotation_code)
else:
frame = video.read()
if frame is None:
break
pts = model.predict(frame)
if not disable_tracking:
boxes, pts = pts
if not disable_tracking:
if len(pts) > 0:
if prev_pts is None and prev_person_ids is None:
person_ids = np.arange(next_person_id, len(pts) + next_person_id, dtype=np.int32)
next_person_id = len(pts) + 1
else:
boxes, pts, person_ids = find_person_id_associations(
boxes=boxes, pts=pts, prev_boxes=prev_boxes, prev_pts=prev_pts, prev_person_ids=prev_person_ids,
next_person_id=next_person_id, pose_alpha=0.2, similarity_threshold=0.4, smoothing_alpha=0.1,
)
next_person_id = max(next_person_id, np.max(person_ids) + 1)
else:
person_ids = np.array((), dtype=np.int32)
prev_boxes = boxes.copy()
prev_pts = pts.copy()
prev_person_ids = person_ids
else:
person_ids = np.arange(len(pts), dtype=np.int32)
for i, (pt, pid) in enumerate(zip(pts, person_ids)):
frame = draw_points_and_skeleton(frame, pt, joints_dict()[hrnet_joints_set]['skeleton'], person_index=pid,
points_color_palette='gist_rainbow', skeleton_color_palette='jet',
points_palette_samples=10)
fps = 1. / (time.time() - t)
print('\rframerate: %f fps' % fps, end='')
if has_display:
cv2.imshow('frame.png', frame)
k = cv2.waitKey(1)
if k == 27: # Esc button
if disable_vidgear:
video.release()
else:
video.stop()
break
else:
cv2.imwrite('frame.png', frame)
if save_video:
if video_writer is None:
fourcc = cv2.VideoWriter_fourcc(*video_format) # video format
video_writer = cv2.VideoWriter('output.avi', fourcc, video_framerate, (frame.shape[1], frame.shape[0]))
video_writer.write(frame)
if save_video:
video_writer.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--camera_id", "-d", help="open the camera with the specified id", type=int, default=0)
parser.add_argument("--filename", "-f", help="open the specified video (overrides the --camera_id option)",
type=str, default=None)
parser.add_argument("--hrnet_m", "-m", help="network model - 'HRNet' or 'PoseResNet'", type=str, default='HRNet')
parser.add_argument("--hrnet_c", "-c", help="hrnet parameters - number of channels (if model is HRNet), "
"resnet size (if model is PoseResNet)", type=int, default=48)
parser.add_argument("--hrnet_j", "-j", help="hrnet parameters - number of joints", type=int, default=17)
parser.add_argument("--hrnet_weights", "-w", help="hrnet parameters - path to the pretrained weights",
type=str, default="./weights/pose_hrnet_w48_384x288.pth")
parser.add_argument("--hrnet_joints_set",
help="use the specified set of joints ('coco' and 'mpii' are currently supported)",
type=str, default="coco")
parser.add_argument("--image_resolution", "-r", help="image resolution", type=str, default='(384, 288)')
parser.add_argument("--single_person",
help="disable the multiperson detection (YOLOv3 or an equivalen detector is required for"
"multiperson detection)",
action="store_true")
parser.add_argument("--use_tiny_yolo",
help="Use YOLOv3-tiny in place of YOLOv3 (faster person detection). Ignored if --single_person",
action="store_true")
parser.add_argument("--disable_tracking",
help="disable the skeleton tracking and temporal smoothing functionality",
action="store_true")
parser.add_argument("--max_batch_size", help="maximum batch size used for inference", type=int, default=16)
parser.add_argument("--disable_vidgear",
help="disable vidgear (which is used for slightly better realtime performance)",
action="store_true") # see https://pypi.org/project/vidgear/
parser.add_argument("--save_video", help="save output frames into a video.", action="store_true")
parser.add_argument("--video_format", help="fourcc video format. Common formats: `MJPG`, `XVID`, `X264`."
"See http://www.fourcc.org/codecs.php", type=str, default='MJPG')
parser.add_argument("--video_framerate", help="video framerate", type=float, default=30)
parser.add_argument("--device", help="device to be used (default: cuda, if available)."
"Set to `cuda` to use all available GPUs (default); "
"set to `cuda:IDS` to use one or more specific GPUs "
"(e.g. `cuda:0` `cuda:1,2`); "
"set to `cpu` to run on cpu.", type=str, default=None)
args = parser.parse_args()
main(**args.__dict__)