-
Notifications
You must be signed in to change notification settings - Fork 0
/
redgreenyolovfiveled.py
167 lines (136 loc) · 5.64 KB
/
redgreenyolovfiveled.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import torch
import numpy as np
import cv2
import csv
import time
from torchvision.ops.boxes import nms
import RPi.GPIO as GPIO
# Set up GPIO pins
red_led_pin = 17
green_led_pin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(red_led_pin, GPIO.OUT)
GPIO.setup(green_led_pin, GPIO.OUT)
# Load the YOLOv5 model
weights = "yolov5s.pt"
model = torch.hub.load('ultralytics/yolov5', 'custom', path=weights)
# Load the dataset configuration
data = "data/coco128.yaml"
model.yaml = data
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device).eval()
# OpenCV setup for video capture
cap = cv2.VideoCapture(0) # Use webcam (change the index if you have multiple cameras)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# CSV setup for saving detection results
csv_file = 'detection_results.csv'
csv_fields = ['timestamp', 'class', 'confidence', 'x', 'y', 'width', 'height']
csv_output = open(csv_file, 'w')
csv_writer = csv.DictWriter(csv_output, fieldnames=csv_fields)
csv_writer.writeheader()
# Folder setup for saving cropped detections
output_folder = 'detection_crops'
# Load class labels
class_labels = model.names
# Object detection loop
while True:
ret, frame = cap.read()
if not ret:
break
# Perform object detection
results = model(frame)
# Get detection information
detections = results.pandas().xyxy[0]
# Filter out bench detections
person_detections = detections[detections['name'] != 'bench']
cat_detections = detections[detections['name'] == 'cat']
if len(person_detections) > 0:
# Apply non-maximum suppression (NMS)
boxes = person_detections[['xmin', 'ymin', 'xmax', 'ymax']].values.astype(np.float32)
scores = person_detections['confidence'].values.astype(np.float32)
keep_indices = nms(torch.tensor(boxes), torch.tensor(scores), iou_threshold=0.5)
keep_indices = keep_indices.cpu().numpy().astype(np.int32) # Convert to NumPy array of integers
# Get the timestamp
timestamp = time.strftime("%I:%M %p")
# Save detection results in CSV and crop images
for idx in keep_indices:
detection = person_detections.iloc[idx]
class_label = detection['name']
confidence = detection['confidence']
x = detection['xmin']
y = detection['ymin']
width = detection['xmax'] - detection['xmin']
height = detection['ymax'] - detection['ymin']
# Write to CSV
csv_writer.writerow({
'timestamp': timestamp,
'class': class_label,
'confidence': confidence,
'x': x,
'y': y,
'width': width,
'height': height
})
# Crop image
crop = frame[int(y):int(y + height), int(x):int(x + width)]
cv2.imwrite(f"{output_folder}/{class_label}_{timestamp}.jpg", crop)
# Turn on red LED
GPIO.output(red_led_pin, GPIO.HIGH)
GPIO.output(green_led_pin, GPIO.LOW)
# Display the frame with bounding boxes and labels
cv2.rectangle(frame, (int(x), int(y)), (int(x + width), int(y + height)), (255, 0, 0), 2)
cv2.putText(frame, f'{class_label}: {confidence:.2f}', (int(x), int(y) - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
elif len(cat_detections) > 0:
# Apply non-maximum suppression (NMS)
boxes = cat_detections[['xmin', 'ymin', 'xmax', 'ymax']].values.astype(np.float32)
scores = cat_detections['confidence'].values.astype(np.float32)
keep_indices = nms(torch.tensor(boxes), torch.tensor(scores), iou_threshold=0.5)
keep_indices = keep_indices.cpu().numpy().astype(np.int32) # Convert to NumPy array of integers
# Get the timestamp
timestamp = time.strftime("%I:%M %p")
# Save detection results in CSV and crop images
for idx in keep_indices:
detection = cat_detections.iloc[idx]
class_label = detection['name']
confidence = detection['confidence']
x = detection['xmin']
y = detection['ymin']
width = detection['xmax'] - detection['xmin']
height = detection['ymax'] - detection['ymin']
# Write to CSV
csv_writer.writerow({
'timestamp': timestamp,
'class': class_label,
'confidence': confidence,
'x': x,
'y': y,
'width': width,
'height': height
})
# Crop image
crop = frame[int(y):int(y + height), int(x):int(x + width)]
cv2.imwrite(f"{output_folder}/{class_label}_{timestamp}.jpg", crop)
# Turn on green LED
GPIO.output(red_led_pin, GPIO.LOW)
GPIO.output(green_led_pin, GPIO.HIGH)
# Display the frame with bounding boxes and labels
cv2.rectangle(frame, (int(x), int(y)), (int(x + width), int(y + height)), (0, 255, 0), 2)
cv2.putText(frame, f'{class_label}: {confidence:.2f}', (int(x), int(y) - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
else:
# Turn off both LEDs if no detections
GPIO.output(red_led_pin, GPIO.LOW)
GPIO.output(green_led_pin, GPIO.LOW)
# Show the frame with detections
cv2.imshow('Object Detection', frame)
if cv2.waitKey(100) == 27: # Press Esc to exit
break
# Release resources
cap.release()
cv2.destroyAllWindows()
csv_output.close()
GPIO.cleanup()