Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable generic exporting of a trained model to ONNX or OpenVINO IR #509

Merged
merged 15 commits into from
Aug 24, 2022
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 13 additions & 9 deletions anomalib/deploy/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ def export_convert(
input_size: Union[List[int], Tuple[int, int]],
onnx_path: Union[str, Path],
export_path: Union[str, Path],
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved
export_mode: str,
):
"""Export the model to onnx format and convert to OpenVINO IR.

Expand All @@ -54,6 +55,7 @@ def export_convert(
input_size (Union[List[int], Tuple[int, int]]): Image size used as the input for onnx converter.
onnx_path (Union[str, Path]): Path to output onnx model.
export_path (Union[str, Path]): Path to exported OpenVINO IR.
export_mode (str): Mode to export onnx or openvino
"""
height, width = input_size
torch.onnx.export(
Expand All @@ -64,12 +66,14 @@ def export_convert(
input_names=["input"],
output_names=["output"],
)
optimize_command = "mo --input_model " + str(onnx_path) + " --output_dir " + str(export_path)
os.system(optimize_command)
with open(Path(export_path) / "meta_data.json", "w", encoding="utf-8") as metadata_file:
meta_data = get_model_metadata(model)
# Convert metadata from torch
for key, value in meta_data.items():
if isinstance(value, Tensor):
meta_data[key] = value.numpy().tolist()
json.dump(meta_data, metadata_file, ensure_ascii=False, indent=4)
if export_mode == "openvino":
export_path = os.path.join(str(export_path), "openvino")
optimize_command = "mo --input_model " + str(onnx_path) + " --output_dir " + str(export_path)
os.system(optimize_command)
with open(Path(export_path) / "meta_data.json", "w", encoding="utf-8") as metadata_file:
meta_data = get_model_metadata(model)
# Convert metadata from torch
for key, value in meta_data.items():
if isinstance(value, Tensor):
meta_data[key] = value.numpy().tolist()
json.dump(meta_data, metadata_file, ensure_ascii=False, indent=4)
3 changes: 3 additions & 0 deletions anomalib/models/cflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,9 @@ logging:
logger: [] # options: [tensorboard, wandb, csv] or combinations.
log_graph: false # Logs the model graph to respective logger.

optimization:
export_mode: ""
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
2 changes: 2 additions & 0 deletions anomalib/models/dfkde/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ logging:
logger: [] # options: [tensorboard, wandb, csv] or combinations.
log_graph: false # Logs the model graph to respective logger.

optimization:
export_mode: ""
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved
# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
2 changes: 2 additions & 0 deletions anomalib/models/dfm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ logging:
logger: [] # options: [tensorboard, wandb, csv] or combinations.
log_graph: false # Logs the model graph to respective logger.

optimization:
export_mode: ""
# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
4 changes: 1 addition & 3 deletions anomalib/models/draem/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,7 @@ logging:
log_graph: false # Logs the model graph to respective logger.

optimization:
openvino:
apply: false

export_mode: ""
# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
3 changes: 3 additions & 0 deletions anomalib/models/fastflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,9 @@ logging:
logger: [] # options: [tensorboard, wandb, csv] or combinations.
log_graph: false # Logs the model graph to respective logger.

optimization:
export_mode: ""

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
3 changes: 1 addition & 2 deletions anomalib/models/ganomaly/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ logging:
log_graph: false # Logs the model graph to respective logger.

optimization:
openvino:
apply: false
export_mode: ""

# PL Trainer Args. Don't add extra parameter here.
trainer:
Expand Down
3 changes: 1 addition & 2 deletions anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ logging:
log_graph: false # Logs the model graph to respective logger.

optimization:
openvino:
apply: false
export_mode: ""

# PL Trainer Args. Don't add extra parameter here.
trainer:
Expand Down
3 changes: 3 additions & 0 deletions anomalib/models/patchcore/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ logging:
logger: [] # options: [tensorboard, wandb, csv] or combinations.
log_graph: false # Logs the model graph to respective logger.

optimization:
export_mode: ""

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
3 changes: 1 addition & 2 deletions anomalib/models/reverse_distillation/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ logging:
log_graph: false # Logs the model graph to respective logger.

optimization:
openvino:
apply: false
export_mode: ""

# PL Trainer Args. Don't add extra parameter here.
trainer:
Expand Down
4 changes: 1 addition & 3 deletions anomalib/models/stfpm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,7 @@ logging:
log_graph: false # Logs the model graph to respective logger.

optimization:
openvino:
apply: false

export_mode: ""
# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
Expand Down
28 changes: 23 additions & 5 deletions anomalib/utils/callbacks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,18 +114,36 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]:
export_dir=os.path.join(config.project.path, "compressed"),
)
)
if "openvino" in config.optimization and config.optimization.openvino.apply:
from .openvino import ( # pylint: disable=import-outside-toplevel
OpenVINOCallback,
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved
if "openvino" in config.optimization.export_mode:
from .export import ( # pylint: disable=import-outside-toplevel
ExportCallback,
)

logger.info("Setting model export to OpenVINO")
callbacks.append(
OpenVINOCallback(
ExportCallback(
input_size=config.model.input_size,
dirpath=os.path.join(config.project.path, "openvino"),
dirpath=config.project.path,
filename="model",
export_mode="openvino",
)
)
elif "onnx" in config.optimization.export_mode:
from .export import ( # pylint: disable=import-outside-toplevel
ExportCallback,
)

logger.info("Setting model export to ONNX")
callbacks.append(
ExportCallback(
input_size=config.model.input_size,
dirpath=config.project.path,
filename="model",
export_mode="onnx",
)
)
else:
logger.info("Export option not found")
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved

# Add callback to log graph to loggers
if config.logging.log_graph not in [None, False]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@


@CALLBACK_REGISTRY
class OpenVINOCallback(Callback):
class ExportCallback(Callback):
"""Callback to compresses a trained model.

Model is first exported to ``.onnx`` format, and then converted to OpenVINO IR.
Expand All @@ -28,23 +28,25 @@ class OpenVINOCallback(Callback):
filename (str): Name of output model
"""

def __init__(self, input_size: Tuple[int, int], dirpath: str, filename: str):
def __init__(self, input_size: Tuple[int, int], dirpath: str, filename: str, export_mode: str):
self.input_size = input_size
self.dirpath = dirpath
self.filename = filename
self.export_mode = export_mode

def on_train_end(self, trainer, pl_module: AnomalyModule) -> None: # pylint: disable=W0613
"""Call when the train ends.

Converts the model to ``onnx`` format and then calls OpenVINO's model optimizer to get the
``.xml`` and ``.bin`` IR files.
"""
logger.info("Exporting the model to OpenVINO")
logger.info("Exporting the model")
os.makedirs(self.dirpath, exist_ok=True)
onnx_path = os.path.join(self.dirpath, self.filename + ".onnx")
export_convert(
model=pl_module,
input_size=self.input_size,
onnx_path=onnx_path,
export_path=self.dirpath,
export_mode=self.export_mode,
)
34 changes: 25 additions & 9 deletions anomalib/utils/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,9 @@ def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""
# TODO: https://github.com/openvinotoolkit/anomalib/issues/19
# TODO: https://github.com/openvinotoolkit/anomalib/issues/20
parser.add_argument("--openvino", type=bool, default=False, help="Export to ONNX and OpenVINO IR format.")
parser.add_argument(
"--export_mode", type=str, default="", help="Select export mode to ONNX or OpenVINO IR format."
)
parser.add_argument("--nncf", type=str, help="Path to NNCF config to enable quantized training.")

# ADD CUSTOM CALLBACKS TO CONFIG
Expand Down Expand Up @@ -213,23 +215,37 @@ def __set_callbacks(self) -> None:
add_visualizer_callback(callbacks, config)
self.config[subcommand].visualization = config.visualization

# TODO: https://github.com/openvinotoolkit/anomalib/issues/19
if config.openvino and config.nncf:
raise ValueError("OpenVINO and NNCF cannot be set simultaneously.")

# Export to OpenVINO
if config.openvino:
from anomalib.utils.callbacks.openvino import ( # pylint: disable=import-outside-toplevel
OpenVINOCallback,
if "openvino" in config.export_mode:
from anomalib.utils.callbacks.export import ( # pylint: disable=import-outside-toplevel
ExportCallback,
)

logger.info("Setting model export to OpenVINO")
callbacks.append(
ExportCallback(
input_size=config.data.init_args.image_size,
dirpath=os.path.join(config.trainer.default_root_dir, "compressed"),
filename="model",
export_mode="openvino",
)
)
elif "onnx" in config.export_mode:
from anomalib.utils.callbacks.export import ( # pylint: disable=import-outside-toplevel
ExportCallback,
)

logger.info("Setting model export to ONNX")
callbacks.append(
OpenVINOCallback(
ExportCallback(
input_size=config.data.init_args.image_size,
dirpath=os.path.join(config.trainer.default_root_dir, "compressed"),
filename="model",
export_mode="onnx",
)
)
else:
logger.info("No model export")
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved
if config.nncf:
if os.path.isfile(config.nncf) and config.nncf.endswith(".yaml"):
nncf_module = import_module("anomalib.core.callbacks.nncf_callback")
Expand Down
3 changes: 2 additions & 1 deletion tests/pre_merge/deploy/test_inferencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,12 @@ def test_openvino_inference(self, model_name: str, category: str = "shapes", pat
input_size=model_config.dataset.image_size,
onnx_path=export_path / "model.onnx",
export_path=export_path,
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved
export_mode="openvino",
)

# Test OpenVINO inferencer
openvino_inferencer = OpenVINOInferencer(
model_config, export_path / "model.xml", export_path / "meta_data.json"
model_config, export_path / "openvino/model.xml", export_path / "openvino/meta_data.json"
)
openvino_dataloader = MockImageLoader(model_config.dataset.image_size, total_count=1)
for image in openvino_dataloader():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ project:
path: ./results

optimization:
openvino:
apply: true
export_mode: "openvino"

trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
55 changes: 55 additions & 0 deletions tests/pre_merge/utils/callbacks/export_callback/test_export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import os
import tempfile

import pytest
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping

from anomalib.utils.callbacks.export import ExportCallback
from tests.helpers.config import get_test_configurable_parameters
from tests.pre_merge.utils.callbacks.export_callback.dummy_lightning_model import (
DummyLightningModule,
FakeDataModule,
)


@pytest.mark.parametrize(
"export_mode",
["openvino", "onnx"],
)
def test_export_model_callback(export_mode):
"""Tests if an optimized model is created."""

config = get_test_configurable_parameters(
config_path="tests/pre_merge/utils/callbacks/export_callback/dummy_config.yml"
)

with tempfile.TemporaryDirectory() as tmp_dir:
config.project.path = tmp_dir
model = DummyLightningModule(hparams=config)
model.callbacks = [
ExportCallback(
input_size=config.model.input_size,
dirpath=os.path.join(tmp_dir),
filename="model",
export_mode=export_mode,
),
EarlyStopping(monitor=config.model.metric),
]
datamodule = FakeDataModule()
trainer = pl.Trainer(
gpus=1,
callbacks=model.callbacks,
logger=False,
checkpoint_callback=False,
max_epochs=1,
val_check_interval=3,
)
trainer.fit(model, datamodule=datamodule)

if "openvino" in export_mode:
assert os.path.exists(os.path.join(tmp_dir, "openvino/model.bin")), "Failed to generate OpenVINO model"
elif "onnx" in export_mode:
assert os.path.exists(os.path.join(tmp_dir, "model.onnx")), "Failed to generate ONNX model"
else:
assert "Unknown export_mode"
samet-akcay marked this conversation as resolved.
Show resolved Hide resolved

This file was deleted.

Loading