-
Notifications
You must be signed in to change notification settings - Fork 2
/
miniview.py
144 lines (123 loc) · 6.14 KB
/
miniview.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import os
import cv2
import numpy as np
import subprocess
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtGui import QPen, QBrush
from PyQt5.Qt import Qt
from PyQt5 import QtCore
class Miniview():
def __init__(self):
self.scene = QGraphicsScene()
def draw_anomalous_time(self,time_frame,total_frames):
time_frame = time_frame.reshape(-1,2)
#self.scene.setSceneRect(0,0,894,24)
#self.scene.addRect(int(886*time_frame[0,0]/total_frames),0,int(886*(time_frame[0,1]-time_frame[0,0])/total_frames),20,QPen(Qt.red),QBrush(Qt.red))
mat = np.zeros((18,880,3),np.uint8)
for i,time in enumerate(time_frame):
if i==0:
# the length of timeline container is 894
start_frame = int(880*time[0]/total_frames)
end_frame = int(880*time[1]/total_frames)
pixels = [0,0,255] # red,green,blue
for j,pixel in enumerate(pixels):
mat[:18,start_frame:end_frame+1,j] = pixel
if time[0]>0:
# draw white for previous frame if first start_frame is bigger than 0
mat[:18,0:start_frame-1,:] = 255
else:
# the length of timeline container is 894
start_frame = int(880*time[0]/total_frames)
end_frame = int(880*time[1]/total_frames)
pixels = [0,0,255] # red,green,blue
for j,pixel in enumerate(pixels):
mat[:18,start_frame:end_frame+1,j] = pixel
# draw white for previous empty timeline
white_start_frame = int(880*time_frame[i-1,1]/total_frames) + 1 # previous end frame +1
white_end_frame = int(880*time[0]/total_frames) - 1 # current start frame -1
# white is [255,255,255] but rgbwill be swapped
mat[:18,white_start_frame:white_end_frame,:] = 255
# if this is the last time set
if(i==len(time_frame)-1):
if end_frame<total_frames:
white_start_frame = end_frame + 1 # start frame for the last white space
white_end_frame = 880 # last frame, int(880*(total_frames/total_frames)) -> 880
# white is [255,255,255] but rgbwill be swapped
mat[:18,white_start_frame:white_end_frame,:] = 255
img = QImage(mat,mat.shape[1],mat.shape[0],QImage.Format_RGB888).rgbSwapped()
pixMap = QPixmap.fromImage(img)
item = QGraphicsPixmapItem(pixMap)
self.scene.addItem(item)
return self.scene
def merge_frame(self,curr_fileName,npy_merge,interval=500):
# find fps
result = subprocess.Popen(['ffprobe','-v','error','-select_streams','v','-of','default=noprint_wrappers=1:nokey=1','-show_entries','stream=r_frame_rate',curr_fileName],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result.wait()
out = result.communicate()
message = out[0].decode()
message = message.split('\n')[0]
fps = int(message.split('/')[0])/int(message.split('/')[1])
print('fps: ',fps)
# initial start and end frame
final_time = []
start_frame = npy_merge[0,0]
end_frame = npy_merge[0,1]
added_frame = 0
for i in range(1,len(npy_merge)):
if (npy_merge[i,0] - start_frame) > (interval+added_frame):
# if the difference of start frames are more than
# interval (default=500) frames, then they are regarded as independent(different) incidents
final_time.append([start_frame,end_frame])
start_frame = npy_merge[i,0]
end_frame = npy_merge[i,1]
# reset added_frame
added_frame = 0
# the most inclusive end frame to include as many incidents(frames)
elif npy_merge[i,1] > end_frame:
end_frame = npy_merge[i,1]
added_frame += npy_merge[i,0]-npy_merge[i-1,0]
# add frame to allow including consecutive events.
print('start frame: {}, end frame: {}, added_frame: {},total_frame: {}'.format(start_frame,end_frame,added_frame,final_time))
final_time.append([start_frame,end_frame])
final_time = np.array(final_time)
return fps,final_time
def draw_miniview(self,dir):
image = self.show_miniview(dir)
#self.imgQ = ImageQt.ImageQt(image)
#print(image.shape[1],image.shape[0])
img = QImage(image.data,image.shape[1],image.shape[0],QImage.Format_RGB888).rgbSwapped()
pixMap = QPixmap.fromImage(img)
item = QGraphicsPixmapItem(pixMap)
self.scene.addItem(item)
return self.scene
def cal_timeline(self,dir):
f = cv2.VideoCapture(dir)
total_frames = f.get(cv2.CAP_PROP_FRAME_COUNT)
frame_rate = f.get(cv2.CAP_PROP_FPS)
total_length = int(total_frames/frame_rate)
time_list = []
time_interval =int(total_frames/4)
for i in range(0,4):
time_list.append(int(i*time_interval/frame_rate))
time_list.append(int(total_frames/frame_rate))
return time_list
def show_miniview(self,dir):
f = cv2.VideoCapture(dir)
total_frames = f.get(cv2.CAP_PROP_FRAME_COUNT)
frame_rate = f.get(cv2.CAP_PROP_FPS)
total_length = int(total_frames/frame_rate)
print('total frames:{}, frame_rate :{} '.format(total_frames,frame_rate))
print('total length:{} '.format(total_length))
#print(f.get(cv2.CAP_PROP_POS_FRAMES))
# the size will be 80*80*11 (width 80, length 80, 11 images)
interval = int(total_frames/11)
frame_list = []
for i in range(0,11):
f_num = i*interval
f.set(cv2.CAP_PROP_POS_FRAMES,f_num)
ret,frame = f.read()
assert ret
frame_list.append(cv2.resize(frame,(80,80)))
frame_concat = cv2.hconcat(frame_list)
return frame_concat