diff --git a/README.md b/README.md index f51ccd97712f..d409b3fdeadf 100755 --- a/README.md +++ b/README.md @@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam file.mp4 # video path/ # directory path/*.jpg # glob - rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream - rtmp://192.168.1.105/live/test # rtmp stream - http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream + 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` To run inference on example images in `data/images`: diff --git a/detect.py b/detect.py index 2a4d6f4550c8..c0707da69e6a 100644 --- a/detect.py +++ b/detect.py @@ -19,7 +19,7 @@ def detect(save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://')) + ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run diff --git a/utils/datasets.py b/utils/datasets.py index 5ef89ab6ea83..ec597b628106 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,8 +20,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ - clean_str +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -275,14 +275,20 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): for i, s in enumerate(sources): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') - cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) + url = eval(s) if s.isnumeric() else s + if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + url = pafy.new(url).getbest(preftype="mp4").url + cap = cv2.VideoCapture(url) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {fps:.2f} FPS).') + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') thread.start() print('') # newline @@ -303,7 +309,7 @@ def update(self, index, cap): success, im = cap.retrieve() self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 - time.sleep(0.01) # wait time + time.sleep(1 / self.fps) # wait time def __iter__(self): self.count = -1 @@ -444,7 +450,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.close() - + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict @@ -489,7 +495,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() - + if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') @@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.datasets import *; autosplit('../coco128') diff --git a/utils/general.py b/utils/general.py index a8aad16a8ab9..5482629ac8c0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -91,17 +91,20 @@ def check_git_status(): print(e) -def check_requirements(file='requirements.txt', exclude=()): - # Check installed dependencies meet requirements +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') - file = Path(file) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] n = 0 # number of packages updates - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] for r in requirements: try: pkg.require(r) @@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()): print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) if n: # if packages updated - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" print(emojis(s)) # emoji-safe