From 3998530e0f1d219e71ada865ffa11978944182d0 Mon Sep 17 00:00:00 2001 From: ben-milanko Date: Sat, 10 Apr 2021 17:14:18 +1000 Subject: [PATCH 01/14] Youtube livestream detection --- detect.py | 2 +- utils/datasets.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index 2a4d6f4550c8..c0707da69e6a 100644 --- a/detect.py +++ b/detect.py @@ -19,7 +19,7 @@ def detect(save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://')) + ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run diff --git a/utils/datasets.py b/utils/datasets.py index 5ef89ab6ea83..c2b2b0f5d7c4 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -13,6 +13,7 @@ from threading import Thread import cv2 +import pafy import numpy as np import torch import torch.nn.functional as F @@ -275,7 +276,12 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): for i, s in enumerate(sources): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') - cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) + url = eval(s) if s.isnumeric() else s + if 'youtube' in url: + video = pafy.new(url) + best = video.getbest(preftype="mp4") + url = best.url + cap = cv2.VideoCapture(url) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) From a9a4678904260740dc26f76b342d51f735e292e3 Mon Sep 17 00:00:00 2001 From: ben-milanko Date: Sun, 11 Apr 2021 11:13:25 +1000 Subject: [PATCH 02/14] dependancy update to auto install pafy --- utils/datasets.py | 7 ++++--- utils/general.py | 36 ++++++++++++++++++++++++++++-------- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c2b2b0f5d7c4..99199d0875c1 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -13,7 +13,6 @@ from threading import Thread import cv2 -import pafy import numpy as np import torch import torch.nn.functional as F @@ -21,7 +20,7 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ clean_str from utils.torch_utils import torch_distributed_zero_first @@ -277,7 +276,9 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') url = eval(s) if s.isnumeric() else s - if 'youtube' in url: + if 'youtube' or 'youtu.be' in url: + check_requirements(file=None, include=('pafy')) + import pafy video = pafy.new(url) best = video.getbest(preftype="mp4") url = best.url diff --git a/utils/general.py b/utils/general.py index a8aad16a8ab9..32ef13309437 100755 --- a/utils/general.py +++ b/utils/general.py @@ -91,18 +91,34 @@ def check_git_status(): print(e) -def check_requirements(file='requirements.txt', exclude=()): +def check_requirements(file='requirements.txt', exclude=(), include=()): # Check installed dependencies meet requirements import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') - file = Path(file) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return + + if file is None: + if len(include) == 0: + print('No file or packages included, check failed.') + return + else: + iter_packages = pkg.parse_requirements(include) + else: + file = Path(file) + if not file.exists() and len(include) == 0: + print(f"{prefix} {file.resolve()} not found, check failed.") + return + elif file.exists() and len(include) == 0: + iter_packages = pkg.parse_requirements(file.open()) + elif not file.exists and len(include) > 0: + iter_packages = pkg.parse_requirements(include) + else: + iter_packages = zip(pkg.parse_requirements(include), pkg.parse_requirements(file.open())) n = 0 # number of packages updates - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + requirements = [f'{x.name}{x.specifier}' for x in iter_packages if x.name not in exclude] for r in requirements: + print(r) + # input() try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met @@ -111,8 +127,12 @@ def check_requirements(file='requirements.txt', exclude=()): print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) if n: # if packages updated - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + if file is not None: + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" + else: + s = f"{prefix} {n} package{'s' * (n > 1)} updated per include\n" + + s += f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" print(emojis(s)) # emoji-safe From 555bfc362b2e229204f573179f85609c0ed56dfd Mon Sep 17 00:00:00 2001 From: ben-milanko Date: Sun, 11 Apr 2021 11:33:47 +1000 Subject: [PATCH 03/14] Remove print --- utils/general.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 32ef13309437..e3c03bd5c8ae 100755 --- a/utils/general.py +++ b/utils/general.py @@ -117,8 +117,6 @@ def check_requirements(file='requirements.txt', exclude=(), include=()): n = 0 # number of packages updates requirements = [f'{x.name}{x.specifier}' for x in iter_packages if x.name not in exclude] for r in requirements: - print(r) - # input() try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met From dde9d4f7e2b360e345c68f693e7c380a55a8b72e Mon Sep 17 00:00:00 2001 From: ben-milanko Date: Sun, 11 Apr 2021 22:47:53 +1000 Subject: [PATCH 04/14] include youtube_dl in deps --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 99199d0875c1..e92d8bb1eb71 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -277,7 +277,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): print(f'{i + 1}/{n}: {s}... ', end='') url = eval(s) if s.isnumeric() else s if 'youtube' or 'youtu.be' in url: - check_requirements(file=None, include=('pafy')) + check_requirements(file=None, include=('pafy', 'youtube_dl')) import pafy video = pafy.new(url) best = video.getbest(preftype="mp4") From 20104e0f1667d26fee536dbb4f0cd4bdbffa9390 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 14:56:23 +0200 Subject: [PATCH 05/14] PEP8 reformat --- utils/datasets.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 99199d0875c1..ce9892e550ea 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,8 +20,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ - clean_str +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -451,7 +451,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.close() - + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict @@ -496,7 +496,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() - + if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') @@ -1041,6 +1041,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.datasets import *; autosplit('../coco128') From 626a77efc4b846f156b4a9db9c26d349383df57a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 14:57:08 +0200 Subject: [PATCH 06/14] youtube url check fix --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8bcb944f1076..70f4df3edd23 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -276,7 +276,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') url = eval(s) if s.isnumeric() else s - if 'youtube' or 'youtu.be' in url: + if 'youtube.com/' in url or 'youtu.be/' in url: check_requirements(file=None, include=('pafy', 'youtube_dl')) import pafy video = pafy.new(url) From 92986c9328aa8ce078181639b2fd2c275f3d1f31 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 15:03:10 +0200 Subject: [PATCH 07/14] reduce lines --- utils/datasets.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 70f4df3edd23..8d5fbb6a4a16 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -279,9 +279,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): if 'youtube.com/' in url or 'youtu.be/' in url: check_requirements(file=None, include=('pafy', 'youtube_dl')) import pafy - video = pafy.new(url) - best = video.getbest(preftype="mp4") - url = best.url + url = pafy.new(url).getbest(preftype="mp4").url cap = cv2.VideoCapture(url) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) From 359001fb8b1dbc9fa052bcae224a1a64492f4430 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 15:04:12 +0200 Subject: [PATCH 08/14] add comment --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8d5fbb6a4a16..86a63786e17e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -276,7 +276,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') url = eval(s) if s.isnumeric() else s - if 'youtube.com/' in url or 'youtu.be/' in url: + if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video check_requirements(file=None, include=('pafy', 'youtube_dl')) import pafy url = pafy.new(url).getbest(preftype="mp4").url From 1a74600913ae26d639bb706d7fc9e829a3dbd330 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 16:02:12 +0200 Subject: [PATCH 09/14] update check_requirements --- utils/datasets.py | 2 +- utils/general.py | 35 +++++++++++------------------------ 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 86a63786e17e..09f1ef199199 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -277,7 +277,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): print(f'{i + 1}/{n}: {s}... ', end='') url = eval(s) if s.isnumeric() else s if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video - check_requirements(file=None, include=('pafy', 'youtube_dl')) + check_requirements(('pafy', 'youtube_dl')) import pafy url = pafy.new(url).getbest(preftype="mp4").url cap = cv2.VideoCapture(url) diff --git a/utils/general.py b/utils/general.py index e3c03bd5c8ae..c659e840fcbc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -91,31 +91,21 @@ def check_git_status(): print(e) -def check_requirements(file='requirements.txt', exclude=(), include=()): - # Check installed dependencies meet requirements +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') - if file is None: - if len(include) == 0: - print('No file or packages included, check failed.') - return - else: - iter_packages = pkg.parse_requirements(include) - else: - file = Path(file) - if not file.exists() and len(include) == 0: + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): print(f"{prefix} {file.resolve()} not found, check failed.") return - elif file.exists() and len(include) == 0: - iter_packages = pkg.parse_requirements(file.open()) - elif not file.exists and len(include) > 0: - iter_packages = pkg.parse_requirements(include) - else: - iter_packages = zip(pkg.parse_requirements(include), pkg.parse_requirements(file.open())) + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] n = 0 # number of packages updates - requirements = [f'{x.name}{x.specifier}' for x in iter_packages if x.name not in exclude] for r in requirements: try: pkg.require(r) @@ -125,12 +115,9 @@ def check_requirements(file='requirements.txt', exclude=(), include=()): print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) if n: # if packages updated - if file is not None: - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" - else: - s = f"{prefix} {n} package{'s' * (n > 1)} updated per include\n" - - s += f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" print(emojis(s)) # emoji-safe From 3d57d56f4dbd3807b3889741871851866944ae4a Mon Sep 17 00:00:00 2001 From: ben-milanko Date: Mon, 12 Apr 2021 00:04:49 +1000 Subject: [PATCH 10/14] stream framerate fix --- utils/datasets.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 09f1ef199199..f8d3a6cad4ed 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -284,10 +284,11 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {fps:.2f} FPS).') + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') thread.start() print('') # newline @@ -306,9 +307,12 @@ def update(self, index, cap): cap.grab() if n == 4: # read every 4th frame success, im = cap.retrieve() + if not success: + cap.release() + raise StopIteration # This does not correctly throw in a thread self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 - time.sleep(0.01) # wait time + time.sleep(1/self.fps) # wait time def __iter__(self): self.count = -1 From f4139154853f9d38e2fc4fbd334969d857405a21 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 16:09:55 +0200 Subject: [PATCH 11/14] Update README.md --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f51ccd97712f..d409b3fdeadf 100755 --- a/README.md +++ b/README.md @@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam file.mp4 # video path/ # directory path/*.jpg # glob - rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream - rtmp://192.168.1.105/live/test # rtmp stream - http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream + 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` To run inference on example images in `data/images`: From b238e58bd303c9775053ae530479b6b23e5a6904 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 16:12:35 +0200 Subject: [PATCH 12/14] cleanup --- utils/general.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index c659e840fcbc..5482629ac8c0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -95,7 +95,6 @@ def check_requirements(requirements='requirements.txt', exclude=()): # Check installed dependencies meet requirements (pass *.txt file or list of packages) import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') - if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) if not file.exists(): From e694ca0acaade2a39a06788523cf8e4a381efbdd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 16:39:33 +0200 Subject: [PATCH 13/14] PEP8 --- utils/datasets.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index f8d3a6cad4ed..d1720afbcd98 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -285,7 +285,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 - + _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) print(f' success ({w}x{h} at {self.fps:.2f} FPS).') @@ -309,10 +309,10 @@ def update(self, index, cap): success, im = cap.retrieve() if not success: cap.release() - raise StopIteration # This does not correctly throw in a thread + raise StopIteration # This does not correctly throw in a thread self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 - time.sleep(1/self.fps) # wait time + time.sleep(1 / self.fps) # wait time def __iter__(self): self.count = -1 From efa93b1ed2cd556eafbc38c38bed718bf3868a32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 18:50:44 +0200 Subject: [PATCH 14/14] remove cap.retrieve() failure code --- utils/datasets.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d1720afbcd98..ec597b628106 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -307,9 +307,6 @@ def update(self, index, cap): cap.grab() if n == 4: # read every 4th frame success, im = cap.retrieve() - if not success: - cap.release() - raise StopIteration # This does not correctly throw in a thread self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 time.sleep(1 / self.fps) # wait time