diff --git a/export.py b/export.py index c2289b3bcd03..077370c6764d 100644 --- a/export.py +++ b/export.py @@ -67,8 +67,8 @@ from models.experimental import attempt_load from models.yolo import Detect from utils.dataloaders import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, check_yaml, - colorstr, file_size, print_args, url2file) +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, + check_yaml, colorstr, file_size, get_default_args, print_args, url2file) from utils.torch_utils import select_device, smart_inference_mode @@ -89,103 +89,110 @@ def export_formats(): return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) +def try_export(inner_func): + # YOLOv5 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +@try_export def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export - try: - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') + ts = torch.jit.trace(model, im, strict=False) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None +@try_export def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - try: - check_requirements(('onnx',)) - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') + check_requirements(('onnx',)) + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={ + 'images': { + 0: 'batch', + 2: 'height', + 3: 'width'}, # shape(1,3,640,640) + 'output': { + 0: 'batch', + 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx + +@try_export def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', f'_openvino_model{os.sep}') - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" + subprocess.check_output(cmd.split()) # export + with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: + yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + return f, None class CoreMLExportModel(torch.nn.Module): @@ -205,6 +212,7 @@ def forward(self, x): boxes = x[:, :4] * torch.tensor([1./w, 1./h, 1./w, 1./h]) return class_probs, boxes +@try_export def export_coreml(model, im, file, num_boxes, num_classes, labels, conf_thres, iou_thres, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export try: @@ -334,74 +342,70 @@ def export_coreml(model, im, file, num_boxes, num_classes, labels, conf_thres, i LOGGER.info(f'\n{prefix} export failure: {e}') return None, None - +@try_export def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - prefix = colorstr('TensorRT:') + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' try: - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + if builder.platform_has_fast_fp16 and half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + return f, None +@try_export def export_saved_model(model, im, file, @@ -415,162 +419,142 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFDetect, TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) - if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return keras_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + from models.tf import TFModel + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW + + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow + _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False + keras_model.summary() + if keras: + keras_model.save(f, save_format='tf') + else: + spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(spec) + frozen_func = convert_variables_to_constants_v2(m) + tfm = tf.Module() + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) + tfm.__call__(im) + tf.saved_model.save(tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( + tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + return f, keras_model +@try_export def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None +@try_export def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export - try: - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - + import tensorflow as tf + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = str(file).replace('.pt', '-fp16.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if int8: + from models.tf import representative_dataset_gen + dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = True + f = str(file).replace('.pt', '-int8.tflite') + if nms or agnostic_nms: + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + return f, None + + +@try_export def export_edgetpu(file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - try: - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" + subprocess.run(cmd.split(), check=True) + return f, None + + +@try_export def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export - try: - check_requirements(('tensorflowjs',)) - import re - - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + check_requirements(('tensorflowjs',)) + import re + + import tensorflowjs as tfjs + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + f_json = f'{f}/model.json' # *.json path + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' + subprocess.run(cmd.split()) + + json = Path(f_json).read_text() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', json) + j.write(subst) + return f, None @smart_inference_mode() @@ -643,13 +627,13 @@ def run( f = [''] * 10 # exported filenames warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning if jit: - f[0] = export_torchscript(model, im, file, optimize) + f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose) + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX - f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO - f[3] = export_openvino(model, file, half) + f[3], _ = export_openvino(model, file, half) if coreml: nb = shape[1] _, f[4] = export_coreml(model, im, file, nb, nc, names, conf_thres, iou_thres, int8, half) @@ -659,7 +643,7 @@ def run( if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model.cpu(), + f[5], model = export_saved_model(model.cpu(), im, file, dynamic, @@ -671,19 +655,19 @@ def run( conf_thres=conf_thres, keras=keras) if pb or tfjs: # pb prerequisite to tfjs - f[6] = export_pb(model, file) + f[6], _ = export_pb(model, file) if tflite or edgetpu: - f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + f[7], _ = export_tflite(model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: - f[8] = export_edgetpu(file) + f[8], _ = export_edgetpu(file) if tfjs: - f[9] = export_tfjs(file) + f[9], _ = export_tfjs(file) # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): h = '--half' if half else '' # --half FP16 inference arg - LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python detect.py --weights {f[-1]} {h}" f"\nValidate: python val.py --weights {f[-1]} {h}" diff --git a/utils/general.py b/utils/general.py index 3bc6fbc22d57..d8c90f10ac8f 100755 --- a/utils/general.py +++ b/utils/general.py @@ -148,6 +148,7 @@ def __init__(self, t=0.0): def __enter__(self): self.start = self.time() + return self def __exit__(self, type, value, traceback): self.dt = self.time() - self.start # delta-time @@ -220,10 +221,10 @@ def methods(instance): return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] -def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): # Print function arguments (optional args dict) x = inspect.currentframe().f_back # previous frame - file, _, fcn, _, _ = inspect.getframeinfo(x) + file, _, func, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} @@ -231,7 +232,7 @@ def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): file = Path(file).resolve().relative_to(ROOT).with_suffix('') except ValueError: file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) @@ -255,7 +256,13 @@ def init_seeds(seed=0, deterministic=False): def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} def get_latest_run(search_dir='.'):