diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 4083ac354c46..f3e36675f49d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: pip list - name: Run benchmarks run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 + python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail Tests: timeout-minutes: 60 diff --git a/export.py b/export.py index 9daf39f871c2..1d8f07fc9e2f 100644 --- a/export.py +++ b/export.py @@ -75,18 +75,18 @@ def export_formats(): # YOLOv5 export formats x = [ - ['PyTorch', '-', '.pt', True], - ['TorchScript', 'torchscript', '.torchscript', True], - ['ONNX', 'onnx', '.onnx', True], - ['OpenVINO', 'openvino', '_openvino_model', False], - ['TensorRT', 'engine', '.engine', True], - ['CoreML', 'coreml', '.mlmodel', False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], - ['TensorFlow GraphDef', 'pb', '.pb', True], - ['TensorFlow Lite', 'tflite', '.tflite', False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], - ['TensorFlow.js', 'tfjs', '_web_model', False],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['CoreML', 'coreml', '.mlmodel', True, False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], + ['TensorFlow GraphDef', 'pb', '.pb', True, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], + ['TensorFlow.js', 'tfjs', '_web_model', False, False],] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 69d653a20916..03bab9b6ded2 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -26,6 +26,7 @@ """ import argparse +import platform import sys import time from pathlib import Path @@ -54,14 +55,17 @@ def run( half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) - for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) + for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i != 9, 'Edge TPU not supported' - assert i != 10, 'TF.js not supported' - if device.type != 'cpu': + assert i not in (9, 10), f'{name} inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', f'{name} inference only supported on macOS>=10.13' + if 'cpu' in device.type: + assert cpu, f'{name} inference not supported on CPU' + if 'cuda' in device.type: assert gpu, f'{name} inference not supported on GPU' # Export @@ -77,6 +81,8 @@ def run( speeds = result[2] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: @@ -102,6 +108,7 @@ def test( half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) @@ -134,6 +141,7 @@ def parse_opt(): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--test', action='store_true', help='test exports only') parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') + parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML print_args(vars(opt))