Skip to content

Commit

Permalink
YouTube Livestream Detection (ultralytics#2752)
Browse files Browse the repository at this point in the history
* Youtube livestream detection

* dependancy update to auto install pafy

* Remove print

* include youtube_dl in deps

* PEP8 reformat

* youtube url check fix

* reduce lines

* add comment

* update check_requirements

* stream framerate fix

* Update README.md

* cleanup

* PEP8

* remove cap.retrieve() failure code

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
  • Loading branch information
Ben Milanko and glenn-jocher committed Apr 11, 2021
1 parent 6253e3d commit fc0385c
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 20 deletions.
5 changes: 2 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam
file.mp4 # video
path/ # directory
path/*.jpg # glob
rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream
rtmp://192.168.1.105/live/test # rtmp stream
http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream
'https://youtu.be/NUsoVlDFqZg' # YouTube video
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
```

To run inference on example images in `data/images`:
Expand Down
2 changes: 1 addition & 1 deletion detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
('rtsp://', 'rtmp://', 'http://', 'https://'))

# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
Expand Down
23 changes: 15 additions & 8 deletions utils/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
from torch.utils.data import Dataset
from tqdm import tqdm

from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first

# Parameters
Expand Down Expand Up @@ -275,14 +275,20 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32):
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
url = eval(s) if s.isnumeric() else s
if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
url = pafy.new(url).getbest(preftype="mp4").url
cap = cv2.VideoCapture(url)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100

_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start()
print('') # newline

Expand All @@ -303,7 +309,7 @@ def update(self, index, cap):
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(0.01) # wait time
time.sleep(1 / self.fps) # wait time

def __iter__(self):
self.count = -1
Expand Down Expand Up @@ -444,7 +450,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close()

def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
Expand Down Expand Up @@ -489,7 +495,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()

if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')

Expand Down Expand Up @@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'


def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco128')
Expand Down
20 changes: 12 additions & 8 deletions utils/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,17 +91,20 @@ def check_git_status():
print(e)


def check_requirements(file='requirements.txt', exclude=()):
# Check installed dependencies meet requirements
def check_requirements(requirements='requirements.txt', exclude=()):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
import pkg_resources as pkg
prefix = colorstr('red', 'bold', 'requirements:')
file = Path(file)
if not file.exists():
print(f"{prefix} {file.resolve()} not found, check failed.")
return
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
if not file.exists():
print(f"{prefix} {file.resolve()} not found, check failed.")
return
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]

n = 0 # number of packages updates
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
for r in requirements:
try:
pkg.require(r)
Expand All @@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()):
print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())

if n: # if packages updated
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s)) # emoji-safe

Expand Down

0 comments on commit fc0385c

Please sign in to comment.