-
Notifications
You must be signed in to change notification settings - Fork 0
/
dogcamai_dnn.py
35 lines (27 loc) · 1.21 KB
/
dogcamai_dnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from dogcamlogger import DogCamLogger
from dogcamaibase import DogCamAIBase
import numpy as np
import cv2
class DogCamAIDNN(DogCamAIBase):
# OpenCV AI network
__net = None
# Sync time rate with displays using OpenCV
_fpsSyncCvTime = 1
def __init__(self, fileLocation: str):
self.__net = cv2.dnn.readNetFromTensorflow(fileLocation, fileLocation + "txt")
super().__init__()
def _ProcessImageInternal(self):
self.__net.setInput(cv2.dnn.blobFromImage(self._image,
size=(self._width, self._height), swapRB=True, crop=False))
vision = self.__net.forward()
self._DrawBoundingBox()
# Attempt to get the objects detected in this frame
for output in vision[0,0,:,:]:
classID = int(output[1])
confidence = float(output[2])
if (not self._targetID or (isinstance(self._targetID, list) and classID in self._targetID)) and confidence > self._minConfidence:
self._LogObjectFound(classID, confidence)
box = output[3:7] * np.array([self._width, self._height, self._width, self._height])
(left, top, right, bottom) = box.astype("int")
self._HandleObjectDetectionResult(left, right, top, bottom)
break