Skip to content

Commit

Permalink
Update for nncf_task (#145)
Browse files Browse the repository at this point in the history
* Update for nncf_task

* linter

* update nncf commit

* Rename CompressionCallback to OpenVINOCallback

* struct

* rename compression to openvino in config

* rm
  • Loading branch information
AlexanderDokuchaev committed Mar 22, 2022
1 parent 539ee6b commit 834d45a
Show file tree
Hide file tree
Showing 18 changed files with 358 additions and 220 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,11 +126,11 @@ python tools/inference.py \
--image_path datasets/MVTec/bottle/test/broken_large/000.png
```

If you want to run OpenVINO model, ensure that `compression` `apply` is set to `True` in the respective model `config.yaml`.
If you want to run OpenVINO model, ensure that `openvino` `apply` is set to `True` in the respective model `config.yaml`.

```yaml
optimization:
compression:
openvino:
apply: true
```

Expand Down
4 changes: 4 additions & 0 deletions anomalib/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@

from anomalib.models.components import AnomalyModule

# TODO(AlexanderDokuchaev): Workaround of wrapping by NNCF.
# Can't not wrap `spatial_softmax2d` if use import_module.
from anomalib.models.padim.model import PadimLightning # noqa: F401


def get_model(config: Union[DictConfig, ListConfig]) -> AnomalyModule:
"""Load model from the configuration file.
Expand Down
18 changes: 1 addition & 17 deletions anomalib/models/ganomaly/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,24 +46,8 @@ project:
save_to_csv: false

optimization:
compression:
openvino:
apply: false
nncf:
apply: false
input_info:
sample_size: null
compression:
algorithm: quantization
initializer:
range:
num_init_samples: 256
update_config:
init_weights: snapshot.ckpt
hyperparameter_search:
parameters:
lr:
min: 1e-4
max: 1e-2

# PL Trainer Args. Don't add extra parameter here.
trainer:
Expand Down
14 changes: 1 addition & 13 deletions anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,20 +39,8 @@ project:
save_to_csv: false

optimization:
compression:
openvino:
apply: false
nncf:
apply: false
input_info:
sample_size: [1, 3, 256, 256]
compression:
algorithm: quantization
initializer:
range:
num_init_samples: 256
ignored_scopes: []
update_config:
init_weights: snapshot.ckpt

# PL Trainer Args. Don't add extra parameter here.
trainer:
Expand Down
3 changes: 2 additions & 1 deletion anomalib/models/padim/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,8 @@ def smooth_anomaly_map(self, anomaly_map: Tensor) -> Tensor:
"""

kernel_size = 2 * int(4.0 * self.sigma + 0.5) + 1
anomaly_map = gaussian_blur2d(anomaly_map, (kernel_size, kernel_size), sigma=(self.sigma, self.sigma))
sigma = torch.as_tensor(self.sigma).to(anomaly_map.device)
anomaly_map = gaussian_blur2d(anomaly_map, (kernel_size, kernel_size), sigma=(sigma, sigma))

return anomaly_map

Expand Down
18 changes: 1 addition & 17 deletions anomalib/models/stfpm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,24 +46,8 @@ project:
save_to_csv: false

optimization:
compression:
openvino:
apply: false
nncf:
apply: false
input_info:
sample_size: null
compression:
algorithm: quantization
initializer:
range:
num_init_samples: 256
update_config:
init_weights: snapshot.ckpt
hyperparameter_search:
parameters:
lr:
min: 1e-4
max: 1e-2

# PL Trainer Args. Don't add extra parameter here.
trainer:
Expand Down
30 changes: 15 additions & 15 deletions anomalib/utils/callbacks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,20 @@
from importlib import import_module
from typing import List, Union

from omegaconf import DictConfig, ListConfig
import yaml
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning.callbacks import Callback, ModelCheckpoint

from .cdf_normalization import CdfNormalizationCallback
from .compress import CompressModelCallback
from .min_max_normalization import MinMaxNormalizationCallback
from .model_loader import LoadModelCallback
from .openvino import OpenVINOCallback
from .save_to_csv import SaveToCSVCallback
from .timer import TimerCallback
from .visualizer_callback import VisualizerCallback

__all__ = [
"CompressModelCallback",
"OpenVINOCallback",
"LoadModelCallback",
"TimerCallback",
"VisualizerCallback",
Expand Down Expand Up @@ -69,10 +70,9 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]:
if "normalization_method" in config.model.keys() and not config.model.normalization_method == "none":
if config.model.normalization_method == "cdf":
if config.model.name in ["padim", "stfpm"]:
if not config.optimization.nncf.apply:
callbacks.append(CdfNormalizationCallback())
else:
if "nncf" in config.optimization and config.optimization.nncf.apply:
raise NotImplementedError("CDF Score Normalization is currently not compatible with NNCF.")
callbacks.append(CdfNormalizationCallback())
else:
raise NotImplementedError("Score Normalization is currently supported for PADIM and STFPM only.")
elif config.model.normalization_method == "min_max":
Expand All @@ -84,24 +84,24 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]:
callbacks.append(VisualizerCallback(inputs_are_normalized=not config.model.normalization_method == "none"))

if "optimization" in config.keys():
if config.optimization.nncf.apply:
if "nncf" in config.optimization and config.optimization.nncf.apply:
# NNCF wraps torch's jit which conflicts with kornia's jit calls.
# Hence, nncf is imported only when required
nncf_module = import_module("anomalib.utils.callbacks.nncf_callback")
nncf_module = import_module("anomalib.utils.callbacks.nncf.callback")
nncf_callback = getattr(nncf_module, "NNCFCallback")
nncf_config = yaml.safe_load(OmegaConf.to_yaml(config.optimization.nncf))
callbacks.append(
nncf_callback(
config=config,
dirpath=os.path.join(config.project.path, "compressed"),
filename="compressed_model",
config=nncf_config,
export_dir=os.path.join(config.project.path, "compressed"),
)
)
if config.optimization.compression.apply:
if "openvino" in config.optimization and config.optimization.openvino.apply:
callbacks.append(
CompressModelCallback(
OpenVINOCallback(
input_size=config.model.input_size,
dirpath=os.path.join(config.project.path, "compressed"),
filename="compressed_model",
dirpath=os.path.join(config.project.path, "openvino"),
filename="openvino_model",
)
)

Expand Down
15 changes: 15 additions & 0 deletions anomalib/utils/callbacks/nncf/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
"""Integration NNCF."""

# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
98 changes: 98 additions & 0 deletions anomalib/utils/callbacks/nncf/callback.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
"""Callbacks for NNCF optimization."""

# Copyright (C) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.

import os
from typing import Any, Dict, Optional

import pytorch_lightning as pl
from nncf import NNCFConfig
from nncf.api.compression import CompressionAlgorithmController
from nncf.torch import register_default_init_args
from pytorch_lightning import Callback

from anomalib.utils.callbacks.nncf.utils import InitLoader, wrap_nncf_model


class NNCFCallback(Callback):
"""Callback for NNCF compression.
Assumes that the pl module contains a 'model' attribute, which is
the PyTorch module that must be compressed.
Args:
config (Dict): NNCF Configuration
export_dir (Str): Path where the export `onnx` and the OpenVINO `xml` and `bin` IR are saved.
If None model will not be exported.
"""

def __init__(self, nncf_config: Dict, export_dir: str = None):
self.export_dir = export_dir
self.nncf_config = NNCFConfig(nncf_config)
self.nncf_ctrl: Optional[CompressionAlgorithmController] = None

# pylint: disable=unused-argument
def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None) -> None:
"""Call when fit or test begins.
Takes the pytorch model and wraps it using the compression controller
so that it is ready for nncf fine-tuning.
"""
if self.nncf_ctrl is not None:
return

init_loader = InitLoader(trainer.datamodule.train_dataloader()) # type: ignore
nncf_config = register_default_init_args(self.nncf_config, init_loader)

self.nncf_ctrl, pl_module.model = wrap_nncf_model(
model=pl_module.model, config=nncf_config, dataloader=trainer.datamodule.train_dataloader() # type: ignore
)

def on_train_batch_start(
self,
trainer: pl.Trainer,
_pl_module: pl.LightningModule,
_batch: Any,
_batch_idx: int,
_unused: Optional[int] = 0,
) -> None:
"""Call when the train batch begins.
Prepare compression method to continue training the model in the next step.
"""
if self.nncf_ctrl:
self.nncf_ctrl.scheduler.step()

def on_train_epoch_start(self, _trainer: pl.Trainer, _pl_module: pl.LightningModule) -> None:
"""Call when the train epoch starts.
Prepare compression method to continue training the model in the next epoch.
"""
if self.nncf_ctrl:
self.nncf_ctrl.scheduler.epoch_step()

def on_train_end(self, _trainer: pl.Trainer, _pl_module: pl.LightningModule) -> None:
"""Call when the train ends.
Exports onnx model and if compression controller is not None, uses the onnx model to generate the OpenVINO IR.
"""
if self.export_dir is None or self.nncf_ctrl is None:
return

os.makedirs(self.export_dir, exist_ok=True)
onnx_path = os.path.join(self.export_dir, "model_nncf.onnx")
self.nncf_ctrl.export_model(onnx_path)
optimize_command = "mo --input_model " + onnx_path + " --output_dir " + self.export_dir
os.system(optimize_command)
Loading

0 comments on commit 834d45a

Please sign in to comment.