diff --git a/utils/benchmarks.py b/utils/benchmarks.py index d0f2a2529c5d..69d653a20916 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -41,7 +41,7 @@ import export import val from utils import notebook_init -from utils.general import LOGGER, check_yaml, print_args +from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device @@ -75,10 +75,10 @@ def run( result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference + y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference except Exception as e: LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') - y.append([name, None, None]) # mAP, t_inference + y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch @@ -86,7 +86,8 @@ def run( LOGGER.info('\n') parse_opt() notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '']) + c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + py = pd.DataFrame(y, columns=c) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') LOGGER.info(str(py if map else py.iloc[:, :2])) return py