From 60887909d556a07686297f62018d1d594dcfc453 Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Mon, 29 Nov 2021 18:27:04 +0100 Subject: [PATCH] Initial docs string (#9) * Initial round of docstring refactoring * Add commends to tox * Fix issues with input_size Co-authored-by: Ashwin Vaidya --- .pre-commit-config.yaml | 11 ++ anomalib/__init__.py | 4 +- anomalib/config/__init__.py | 4 +- anomalib/config/config.py | 30 ++- anomalib/core/__init__.py | 4 +- anomalib/core/callbacks/__init__.py | 12 +- anomalib/core/callbacks/compress.py | 21 +- anomalib/core/callbacks/model_loader.py | 2 +- anomalib/core/callbacks/nncf_callback.py | 36 ++-- anomalib/core/callbacks/save_to_csv.py | 17 +- anomalib/core/callbacks/timer.py | 10 +- .../core/callbacks/visualizer_callback.py | 17 +- anomalib/core/model/__init__.py | 4 +- anomalib/core/model/anomaly_module.py | 43 ++--- anomalib/core/model/dynamic_module.py | 28 ++- anomalib/core/model/feature_extractor.py | 26 +-- anomalib/core/model/inference.py | 71 +++---- anomalib/core/model/kde.py | 23 +-- anomalib/core/model/multi_variate_gaussian.py | 19 +- anomalib/core/model/pca.py | 33 ++-- anomalib/core/results/__init__.py | 6 +- anomalib/core/results/results.py | 40 ++-- anomalib/datasets/__init__.py | 8 +- anomalib/datasets/anomaly_dataset.py | 127 ++++++------ anomalib/datasets/mvtec.py | 79 ++++---- anomalib/datasets/parser.py | 23 ++- anomalib/datasets/tiler.py | 57 ++---- anomalib/datasets/transforms/__init__.py | 4 +- anomalib/datasets/transforms/pre_process.py | 52 +++-- anomalib/datasets/utils.py | 51 +++-- anomalib/loggers/__init__.py | 12 +- anomalib/loggers/base.py | 6 +- anomalib/loggers/tensorboard.py | 25 ++- anomalib/models/__init__.py | 5 +- anomalib/models/dfkde/__init__.py | 4 +- anomalib/models/dfkde/model.py | 23 +-- anomalib/models/dfkde/normality_model.py | 46 ++--- anomalib/models/dfm/__init__.py | 4 +- anomalib/models/dfm/dfm_model.py | 40 ++-- anomalib/models/dfm/model.py | 23 +-- anomalib/models/padim/__init__.py | 4 +- anomalib/models/padim/model.py | 105 +++++----- anomalib/models/patchcore/__init__.py | 4 +- anomalib/models/patchcore/model.py | 61 +++--- anomalib/models/patchcore/utils/__init__.py | 1 + .../patchcore/utils/sampling/__init__.py | 4 +- .../utils/sampling/k_center_greedy.py | 34 ++-- .../utils/sampling/nearest_neighbors.py | 16 +- .../utils/sampling/random_projection.py | 31 ++- anomalib/models/stfpm/__init__.py | 4 +- anomalib/models/stfpm/model.py | 108 +++++------ anomalib/utils/__init__.py | 4 +- anomalib/utils/download_progress_bar.py | 181 +++++++++--------- anomalib/utils/metrics.py | 10 +- anomalib/utils/post_process.py | 18 +- anomalib/utils/visualizer.py | 31 +-- setup.py | 4 +- tests/__init__.py | 4 +- tests/config/__init__.py | 4 +- tests/config/test_config.py | 16 +- .../dummy_lightning_model.py | 5 +- .../compress_callback/test_compress.py | 6 +- .../dummy_lightning_model.py | 5 +- .../visualizer_callback/test_visualizer.py | 2 +- tests/datasets/test_anomaly_dataset.py | 8 +- tests/datasets/test_dataset.py | 59 ++---- tests/datasets/test_tiler.py | 35 +--- tests/datasets/test_transforms.py | 28 +-- tests/helpers/dataset.py | 18 +- tests/helpers/detection.py | 13 +- tests/helpers/shapes.py | 13 +- tests/loggers/__init__.py | 4 +- tests/loggers/test_get_logger.py | 6 +- tests/models/__init__.py | 4 +- tests/models/test_model.py | 17 +- tests/utils/__init__.py | 4 +- tests/utils/test_download_progress_bar.py | 6 +- tools/inference.py | 15 +- tools/test.py | 15 +- tools/train.py | 17 +- tox.ini | 26 ++- 81 files changed, 836 insertions(+), 1164 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4004e51b76..56aa954bc9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,3 +57,14 @@ repos: args: [--config-file=tox.ini] additional_dependencies: [types-PyYAML] exclude: "tests/" + + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.1.1 + hooks: + - id: pydocstyle + name: pydocstyle + entry: pydocstyle + language: python + types: [python] + args: [--config=tox.ini] + exclude: "tests|docs" diff --git a/anomalib/__init__.py b/anomalib/__init__.py index 825f1a89db..e0a1616d5d 100644 --- a/anomalib/__init__.py +++ b/anomalib/__init__.py @@ -1,6 +1,4 @@ -""" -Anomalib library for research and benchmarking -""" +"""Anomalib library for research and benchmarking.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/config/__init__.py b/anomalib/config/__init__.py index 47309d8cc6..628ad792b6 100644 --- a/anomalib/config/__init__.py +++ b/anomalib/config/__init__.py @@ -1,6 +1,4 @@ -""" -Utilities to get configurable parameters -""" +"""Utilities to get configurable parameters.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/config/config.py b/anomalib/config/config.py index 3655f64ab8..db409571f1 100644 --- a/anomalib/config/config.py +++ b/anomalib/config/config.py @@ -1,6 +1,4 @@ -""" -Get configurable parameters -""" +"""Get configurable parameters.""" # Copyright (C) 2020 Intel Corporation # @@ -28,16 +26,16 @@ def update_input_size_config(config: Union[DictConfig, ListConfig]) -> Union[DictConfig, ListConfig]: - """ - Convert integer image size parameters into tuples, calculate the effective input size based on image size and crop - size, and set tiling stride if undefined. + """Update config with image size as tuple, effective input size and tiling stride. + + Convert integer image size parameters into tuples, calculate the effective input size based on image size + and crop size, and set tiling stride if undefined. Args: - config: Dictconfig: Configurable parameters object + config (Union[DictConfig, ListConfig]): Configurable parameters object Returns: Configurable parameters with updated values - """ # handle image size if isinstance(config.dataset.image_size, int): @@ -55,8 +53,7 @@ def update_input_size_config(config: Union[DictConfig, ListConfig]) -> Union[Dic def update_nncf_config(config: Union[DictConfig, ListConfig]) -> Union[DictConfig, ListConfig]: - """ - Set the NNCF input size based on the value of the crop_size parameter in the configurable parameters object. + """Set the NNCF input size based on the value of the crop_size parameter in the configurable parameters object. Args: config: Dictconfig: Configurable parameters of the current run. @@ -76,8 +73,9 @@ def update_nncf_config(config: Union[DictConfig, ListConfig]) -> Union[DictConfi def update_multi_gpu_training_config(config: Union[DictConfig, ListConfig]) -> Union[DictConfig, ListConfig]: - """Updates the config to change learning rate based on number of gpus assigned - and ensures only ddp accelerator is used + """Updates the config to change learning rate based on number of gpus assigned. + + Current behaviour is to ensure only ddp accelerator is used. Args: config (Union[DictConfig, ListConfig]): Configurable parameters for the current run @@ -115,9 +113,7 @@ def update_multi_gpu_training_config(config: Union[DictConfig, ListConfig]) -> U def update_device_config(config: Union[DictConfig, ListConfig], openvino: bool) -> Union[DictConfig, ListConfig]: - """ - Update XPU Device Config - This function ensures devices are configured correctly by the user. + """Update XPU Device Config This function ensures devices are configured correctly by the user. Args: config (Union[DictConfig, ListConfig]): Input config @@ -150,8 +146,7 @@ def get_configurable_parameters( config_filename: Optional[str] = "config", config_file_extension: Optional[str] = "yaml", ) -> Union[DictConfig, ListConfig]: - """ - Get configurable parameters + """Get configurable parameters. Args: model_name: Optional[str]: (Default value = None) @@ -163,7 +158,6 @@ def get_configurable_parameters( Returns: Configurable parameters in DictConfig object. - """ if model_name is None and model_config_path is None: raise ValueError( diff --git a/anomalib/core/__init__.py b/anomalib/core/__init__.py index 064449feb1..7adddf907b 100644 --- a/anomalib/core/__init__.py +++ b/anomalib/core/__init__.py @@ -1,6 +1,4 @@ -""" -This module holds common components such as callbacks, custom modules and utils -""" +"""This module holds common components such as callbacks, custom modules and utils.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/core/callbacks/__init__.py b/anomalib/core/callbacks/__init__.py index 0bf732fb28..a6eab0e54e 100644 --- a/anomalib/core/callbacks/__init__.py +++ b/anomalib/core/callbacks/__init__.py @@ -1,4 +1,4 @@ -"""Callbacks for Anomalib models""" +"""Callbacks for Anomalib models.""" import os from importlib import import_module @@ -23,13 +23,13 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]: - """Return base callbacks for all the lightning models + """Return base callbacks for all the lightning models. Args: - config (DictConfig): model config + config (DictConfig): Model config - Returns: - (List[Callback]): List of callbacks + Return: + (List[Callback]): List of callbacks. """ callbacks: List[Callback] = [] @@ -65,7 +65,7 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]: if config.optimization.compression.apply: callbacks.append( CompressModelCallback( - config=config, + input_size=config.model.input_size, dirpath=os.path.join(config.project.path, "compressed"), filename="compressed_model", ) diff --git a/anomalib/core/callbacks/compress.py b/anomalib/core/callbacks/compress.py index 68ea6db7c9..81c2a87443 100644 --- a/anomalib/core/callbacks/compress.py +++ b/anomalib/core/callbacks/compress.py @@ -1,27 +1,32 @@ """Callback that compresses a trained model by first exporting to .onnx format, and then converting to OpenVINO IR.""" import os -from typing import Union +from typing import Tuple import torch -from omegaconf import DictConfig, ListConfig from pytorch_lightning import Callback, LightningModule class CompressModelCallback(Callback): - """ - Callback that compresses a trained model by first exporting to .onnx format, and then converting to OpenVINO IR. + """Callback to compresses a trained model. + + Model is first exported to .onnx format, and then converted to OpenVINO IR. + + Args: + input_size (Tuple[int, int]): Tuple of image height, width + dirpath (str): Path for model output + filename (str): Name of output model """ - def __init__(self, config: Union[ListConfig, DictConfig], dirpath: str, filename: str): - self.config = config + def __init__(self, input_size: Tuple[int, int], dirpath: str, filename: str): + self.input_size = input_size self.dirpath = dirpath self.filename = filename def on_train_end(self, trainer, pl_module: LightningModule) -> None: # pylint: disable=W0613 - """Called when the train ends.""" + """Call when the train ends.""" os.makedirs(self.dirpath, exist_ok=True) onnx_path = os.path.join(self.dirpath, self.filename + ".onnx") - height, width = self.config.model.input_size + height, width = self.input_size torch.onnx.export( pl_module.model, torch.zeros((1, 3, height, width)).to(pl_module.device), onnx_path, opset_version=11 ) diff --git a/anomalib/core/callbacks/model_loader.py b/anomalib/core/callbacks/model_loader.py index b160c9eaa7..4cc89f9cb8 100644 --- a/anomalib/core/callbacks/model_loader.py +++ b/anomalib/core/callbacks/model_loader.py @@ -10,5 +10,5 @@ def __init__(self, weights_path): self.weights_path = weights_path def on_test_start(self, trainer, pl_module: LightningModule) -> None: # pylint: disable=W0613 - """Called when the test begins.""" + """Call when the test begins.""" pl_module.load_state_dict(torch.load(self.weights_path)["state_dict"]) diff --git a/anomalib/core/callbacks/nncf_callback.py b/anomalib/core/callbacks/nncf_callback.py index 7cd75c3823..65366d7e5c 100644 --- a/anomalib/core/callbacks/nncf_callback.py +++ b/anomalib/core/callbacks/nncf_callback.py @@ -1,6 +1,4 @@ -""" -NNCF Callback -""" +"""NNCF Callback.""" import os from typing import Any, Dict, Iterator, Optional, Tuple @@ -19,16 +17,7 @@ def criterion_fn(outputs, criterion): - """ - criterion_fn [summary] - - Args: - outputs ([type]): [description] - criterion ([type]): [description] - - Returns: - [type]: [description] - """ + """Calls the criterion function on outputs.""" return criterion(outputs) @@ -40,15 +29,18 @@ def __init__(self, data_loader: DataLoader): self._data_loader_iter: Iterator def __iter__(self): + """Create iterator for dataloader.""" self._data_loader_iter = iter(self._data_loader) return self def __next__(self) -> Any: + """Return next item from dataloader iterator.""" loaded_item = next(self._data_loader_iter) return loaded_item["image"] def get_inputs(self, dataloader_output) -> Tuple[Tuple, Dict]: - """ + """Get input to model. + Returns: (dataloader_output,), {}: Tuple[Tuple, Dict]: The current model call to be made during the initialization process @@ -56,9 +48,7 @@ def get_inputs(self, dataloader_output) -> Tuple[Tuple, Dict]: return (dataloader_output,), {} def get_target(self, _): - """ - Parses the generic data loader output and returns a structure to be used as - ground truth in the loss criterion. + """Return structure for ground truth in loss criterion based on dataloader output. Returns: None @@ -69,8 +59,8 @@ def get_target(self, _): class NNCFCallback(Callback): """Callback for NNCF compression. - Assumes that the pl module contains a 'model' attribute, which is the PyTorch module - that must be compressed. + Assumes that the pl module contains a 'model' attribute, which is + the PyTorch module that must be compressed. """ def __init__(self, config, dirpath, filename): @@ -88,7 +78,7 @@ def __init__(self, config, dirpath, filename): self.compression_scheduler: CompressionScheduler def setup(self, _: pl.Trainer, pl_module: pl.LightningModule, __: Optional[str] = None) -> None: - """Called when fit or test begins""" + """Call when fit or test begins.""" if self.comp_ctrl is None: init_loader = InitLoader(self.train_loader) nncf_config = register_default_init_args( @@ -101,13 +91,13 @@ def setup(self, _: pl.Trainer, pl_module: pl.LightningModule, __: Optional[str] def on_train_batch_start( self, trainer, _pl_module: pl.LightningModule, _batch: Any, _batch_idx: int, _dataloader_idx: int ) -> None: - """Called when the train batch begins.""" + """Call when the train batch begins.""" self.compression_scheduler.step() if self.comp_ctrl is not None: trainer.model.loss_val = self.comp_ctrl.loss() def on_train_end(self, _trainer, _pl_module: pl.LightningModule) -> None: - """Called when the train ends.""" + """Call when the train ends.""" os.makedirs(self.dirpath, exist_ok=True) onnx_path = os.path.join(self.dirpath, self.filename + ".onnx") if self.comp_ctrl is not None: @@ -118,5 +108,5 @@ def on_train_end(self, _trainer, _pl_module: pl.LightningModule) -> None: def on_train_epoch_end( self, _trainer: pl.Trainer, _pl_module: pl.LightningModule, _unused: Optional[Any] = None ) -> None: - """Called when the train epoch ends.""" + """Call when the train epoch ends.""" self.compression_scheduler.epoch_step() diff --git a/anomalib/core/callbacks/save_to_csv.py b/anomalib/core/callbacks/save_to_csv.py index 9213dadac9..a670dba50c 100644 --- a/anomalib/core/callbacks/save_to_csv.py +++ b/anomalib/core/callbacks/save_to_csv.py @@ -1,6 +1,4 @@ -""" -SaveToCSV Callback -""" +"""Callback to save metrics to CSV.""" from pathlib import Path import numpy as np @@ -11,21 +9,22 @@ class SaveToCSVCallback(Callback): - """ - Callback that saves the inference results of a model. The callback generates a csv file that saves different - performance metrics and results. + """Callback that saves the inference results of a model. + + The callback generates a csv file that saves different performance + metrics and results. """ def __init__(self): - """SaveToCSV callback""" + """Callback to save metrics to CSV.""" def on_test_epoch_end(self, _trainer: Trainer, pl_module: AnomalyModule) -> None: - """Save Results at the end of training + """Save Results at the end of training. + Args: _trainer (Trainer): Pytorch lightning trainer object (unused) pl_module (LightningModule): Lightning modules derived from BaseAnomalyLightning object. """ - results = pl_module.results data_frame = pd.DataFrame( { diff --git a/anomalib/core/callbacks/timer.py b/anomalib/core/callbacks/timer.py index 76fd8b1797..276edbe1ad 100644 --- a/anomalib/core/callbacks/timer.py +++ b/anomalib/core/callbacks/timer.py @@ -1,4 +1,4 @@ -"""Callback to measure training and testing time of a PyTorch Lightning module""" +"""Callback to measure training and testing time of a PyTorch Lightning module.""" import time from pytorch_lightning import Callback, LightningModule @@ -11,17 +11,17 @@ def __init__(self): self.start = None def on_fit_start(self, trainer, pl_module: LightningModule) -> None: # pylint: disable=W0613 - """Called when fit begins""" + """Call when fit begins.""" self.start = time.time() def on_fit_end(self, trainer, pl_module: LightningModule) -> None: # pylint: disable=W0613 - """Called when fit ends""" + """Call when fit ends.""" print(f"Training took {time.time() - self.start} seconds") def on_test_start(self, trainer, pl_module: LightningModule) -> None: # pylint: disable=W0613 - """Called when the test begins.""" + """Call when the test begins.""" self.start = time.time() def on_test_end(self, trainer, pl_module: LightningModule) -> None: # pylint: disable=W0613 - """Called when the test ends.""" + """Call when the test ends.""" print(f"Testing took {time.time() - self.start} seconds.") diff --git a/anomalib/core/callbacks/visualizer_callback.py b/anomalib/core/callbacks/visualizer_callback.py index 91494beaef..10b56c672e 100644 --- a/anomalib/core/callbacks/visualizer_callback.py +++ b/anomalib/core/callbacks/visualizer_callback.py @@ -1,6 +1,4 @@ -""" -Visualizer Callback -""" +"""Visualizer Callback.""" from pathlib import Path from warnings import warn @@ -18,16 +16,17 @@ class VisualizerCallback(Callback): - """ - Callback that visualizes the inference results of a model. The callback generates a figure showing the original - image, the ground truth segmentation mask, the predicted error heat map, and the predicted segmentation mask. + """Callback that visualizes the inference results of a model. + + The callback generates a figure showing the original image, the ground truth segmentation mask, + the predicted error heat map, and the predicted segmentation mask. To save the images to the filesystem, add the 'local' keyword to the project.log_images_to parameter in the config.yaml file. . """ def __init__(self): - """Visualizer callback""" + """Visualizer callback.""" def _add_images( self, @@ -59,13 +58,13 @@ def _add_images( visualizer.save(Path(module.hparams.project.path) / "images" / filename.parent.name / filename.name) def on_test_epoch_end(self, _trainer: Trainer, pl_module: LightningModule) -> None: - """Log images at the end of training + """Log images at the end of training. + Args: _trainer (Trainer): Pytorch lightning trainer object (unused) pl_module (LightningModule): Lightning modules derived from BaseAnomalyLightning object as currently only they support logging images. """ - if isinstance(pl_module.results, SegmentationResults): results = pl_module.results else: diff --git a/anomalib/core/model/__init__.py b/anomalib/core/model/__init__.py index 172c46fcc9..cf27494ddb 100644 --- a/anomalib/core/model/__init__.py +++ b/anomalib/core/model/__init__.py @@ -1,6 +1,4 @@ -""" -Anomalib Core Model Entities -""" +"""Anomalib Core Model Entities.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/core/model/anomaly_module.py b/anomalib/core/model/anomaly_module.py index 4deab5e658..98f6e3492c 100644 --- a/anomalib/core/model/anomaly_module.py +++ b/anomalib/core/model/anomaly_module.py @@ -1,6 +1,4 @@ -""" -Base Anomaly Module for Training Task -""" +"""Base Anomaly Module for Training Task.""" # Copyright (C) 2020 Intel Corporation # @@ -29,8 +27,7 @@ class AnomalyModule(pl.LightningModule): - """ - AnomalyModule to train, validate, predict and test images. + """AnomalyModule to train, validate, predict and test images. Args: params (Union[DictConfig, ListConfig]): Configuration @@ -58,8 +55,7 @@ def __init__(self, params: Union[DictConfig, ListConfig]): raise NotImplementedError("Only Classification and Segmentation tasks are supported in this version.") def forward(self, batch): # pylint: disable=arguments-differ - """ - Forward-pass input tensor to the module + """Forward-pass input tensor to the module. Args: batch (Tensor): Input Tensor @@ -70,8 +66,8 @@ def forward(self, batch): # pylint: disable=arguments-differ return self.model(batch) def predict_step(self, batch, batch_idx, _): # pylint: disable=arguments-differ, signature-differs - """ - Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. + """Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. + By default, it calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`. Override to add any processing logic. @@ -86,8 +82,7 @@ def predict_step(self, batch, batch_idx, _): # pylint: disable=arguments-differ return self._post_process(self.validation_step(batch, batch_idx), predict_labels=True) def test_step(self, batch, _): # pylint: disable=arguments-differ - """ - Calls validation_step for anomaly map/score calculation. + """Calls validation_step for anomaly map/score calculation. Args: batch: Input batch @@ -96,30 +91,22 @@ def test_step(self, batch, _): # pylint: disable=arguments-differ Returns: Dictionary containing images, features, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. - """ return self.validation_step(batch, _) def validation_step_end(self, val_step_outputs): # pylint: disable=arguments-differ - """ - Called at the end of each validation step. - """ + """Called at the end of each validation step.""" return self._post_process(val_step_outputs) def test_step_end(self, test_step_outputs): # pylint: disable=arguments-differ - """ - Called at the end of each validation step. - """ + """Called at the end of each validation step.""" return self._post_process(test_step_outputs) def validation_epoch_end(self, outputs): - """ - Compute image-level performance metrics + """Compute image-level performance metrics. Args: outputs: Batch of outputs from the validation step - - """ self.results.store_outputs(outputs) if self.hparams.model.threshold.adaptive: @@ -129,21 +116,17 @@ def validation_epoch_end(self, outputs): self._log_metrics() def test_epoch_end(self, outputs): - """ - Compute and save anomaly scores of the test set. + """Compute and save anomaly scores of the test set. Args: outputs: Batch of outputs from the validation step - """ self.results.store_outputs(outputs) self.results.evaluate(self.threshold.item()) self._log_metrics() def _post_process(self, outputs, predict_labels=False): - """ - Compute labels based on model predictions. - """ + """Compute labels based on model predictions.""" if "pred_scores" not in outputs and "anomaly_maps" in outputs: outputs["pred_scores"] = ( outputs["anomaly_maps"].reshape(outputs["anomaly_maps"].shape[0], -1).max(axis=1).values @@ -153,8 +136,6 @@ def _post_process(self, outputs, predict_labels=False): return outputs def _log_metrics(self): - """ - Log computed performance metrics - """ + """Log computed performance metrics.""" for name, value in self.results.performance.items(): self.log(name=name, value=value, on_epoch=True, prog_bar=True) diff --git a/anomalib/core/model/dynamic_module.py b/anomalib/core/model/dynamic_module.py index 45a2b0c81a..fb71672ba0 100644 --- a/anomalib/core/model/dynamic_module.py +++ b/anomalib/core/model/dynamic_module.py @@ -1,6 +1,4 @@ -""" -Dynamic Buffer Module -""" +"""Dynamic Buffer Module.""" # Copyright (C) 2020 Intel Corporation # @@ -22,19 +20,19 @@ class DynamicBufferModule(ABC, nn.Module): - """ - Torch module that allows loading variables from the state dict even in the case of shape mismatch. - """ + """Torch module that allows loading variables from the state dict even in the case of shape mismatch.""" def get_tensor_attribute(self, attribute_name: str) -> Tensor: - """ - get_tensor [summary] + """Get attribute of the tensor given the name. Args: - attribute_name (str): [description] + attribute_name (str): Name of the tensor + + Raises: + ValueError: `attribute_name` is not a torch Tensor Returns: - Tensor: [description] + Tensor: Tensor attribute """ attribute = self.__getattr__(attribute_name) if isinstance(attribute, Tensor): @@ -43,14 +41,14 @@ def get_tensor_attribute(self, attribute_name: str) -> Tensor: raise ValueError(f"Attribute with name '{attribute_name}' is not a torch Tensor") def _load_from_state_dict(self, state_dict: dict, prefix: str, *args): - """ - Overrides method from parent class. Resizes the local buffers to match those stored in the state dict. + """Resizes the local buffers to match those stored in the state dict. + + Overrides method from parent class. Args: - state_dict: dict: State dictionary containing weights - prefix: str: Prefix of the weight file. + state_dict (dict): State dictionary containing weights + prefix (str): Prefix of the weight file. *args: - """ persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_buffers = {k: v for k, v in persistent_buffers.items() if v is not None} diff --git a/anomalib/core/model/feature_extractor.py b/anomalib/core/model/feature_extractor.py index 92b60fe43e..6453545ead 100644 --- a/anomalib/core/model/feature_extractor.py +++ b/anomalib/core/model/feature_extractor.py @@ -1,5 +1,5 @@ -""" -Feature Extractor +"""Feature Extractor. + This script extracts features from a CNN network """ @@ -24,11 +24,9 @@ class FeatureExtractor(nn.Module): - """ - Extract features from a CNN + """Extract features from a CNN. Example: - >>> import torch >>> import torchvision >>> from anomalib.core.model.feature_extractor import FeatureExtractor @@ -54,40 +52,34 @@ def __init__(self, backbone: nn.Module, layers: Iterable[str]): layer.register_forward_hook(self.get_features(layer_id)) def get_features(self, layer_id: str) -> Callable: - """ - Get layer features. + """Get layer features. Args: layer_id: str: Layer ID Returns: Layer features - """ def hook(_, __, output): - """ - Hook to extract features via a forward-pass. + """Hook to extract features via a forward-pass. Args: output: Feature map collected after the forward-pass. - """ self._features[layer_id] = output return hook - def forward(self, tensor: Tensor) -> Dict[str, Tensor]: - """ - Forward-pass input tensor into the CNN + def forward(self, input_tensor: Tensor) -> Dict[str, Tensor]: + """Forward-pass input tensor into the CNN. Args: - tensor: Tensor: + input_tensor (Tensor): Input tensor Returns: Feature map extracted from the CNN - """ self._features = {layer: torch.empty(0) for layer in self.layers} - _ = self.backbone(tensor) + _ = self.backbone(input_tensor) return self._features diff --git a/anomalib/core/model/inference.py b/anomalib/core/model/inference.py index faf764b360..48051257aa 100644 --- a/anomalib/core/model/inference.py +++ b/anomalib/core/model/inference.py @@ -1,7 +1,4 @@ -""" -This module contains inference-related abstract class -and its Torch and OpenVINO implementations. -""" +"""This module contains inference-related abstract class and its Torch and OpenVINO implementations.""" # Copyright (C) 2020 Intel Corporation # @@ -36,43 +33,35 @@ class Inferencer(ABC): - """ - Abstract class for the inference. + """Abstract class for the inference. + This is used by both Torch and OpenVINO inference. """ @abstractmethod def load_model(self, path: Union[str, Path]): - """ - Load Model - """ + """Load Model.""" raise NotImplementedError @abstractmethod def pre_process(self, image: np.ndarray) -> Union[np.ndarray, Tensor]: - """ - Pre-process - """ + """Pre-process.""" raise NotImplementedError @abstractmethod def forward(self, image: Union[np.ndarray, Tensor]) -> Union[np.ndarray, Tensor]: - """ - Forward-Pass input to model - """ + """Forward-Pass input to model.""" raise NotImplementedError @abstractmethod def post_process(self, predictions: Union[np.ndarray, Tensor], meta_data: Optional[Dict]) -> np.ndarray: - """ - Post-Process - """ + """Post-Process.""" raise NotImplementedError def predict(self, image: Union[str, np.ndarray], superimpose: bool = True) -> np.ndarray: - """ - Perform a prediction for a given input image. The main workflow is - (i) pre-processing, (ii) forward-pass, (iii) post-process. + """Perform a prediction for a given input image. + + The main workflow is (i) pre-processing, (ii) forward-pass, (iii) post-process. Args: image (Union[str, np.ndarray]): Input image whose output is to be predicted. @@ -85,7 +74,6 @@ def predict(self, image: Union[str, np.ndarray], superimpose: bool = True) -> np Returns: np.ndarray: Output predictions to be visualized. """ - if isinstance(image, str): image = read_image(image) @@ -99,12 +87,19 @@ def predict(self, image: Union[str, np.ndarray], superimpose: bool = True) -> np return output def __call__(self, image: np.ndarray) -> np.ndarray: + """Call predict on the Image. + + Args: + image (np.ndarray): Input Image + + Returns: + np.ndarray: Output predictions to be visualized + """ return self.predict(image) class TorchInferencer(Inferencer): - """ - PyTorch implementation for the inference. + """PyTorch implementation for the inference. Args: config (DictConfig): Configurable parameters that are used @@ -120,8 +115,7 @@ def __init__(self, config: Union[DictConfig, ListConfig], path: Union[str, Path, self.model = self.load_model(path) def load_model(self, path: Union[str, Path]) -> nn.Module: - """ - Load the PyTorch model. + """Load the PyTorch model. Args: path (Union[str, Path]): Path to model ckpt file. @@ -135,8 +129,7 @@ def load_model(self, path: Union[str, Path]) -> nn.Module: return model def pre_process(self, image: np.ndarray) -> Tensor: - """ - Pre process the input image by applying transformations. + """Pre process the input image by applying transformations. Args: image (np.ndarray): Input image @@ -153,8 +146,7 @@ def pre_process(self, image: np.ndarray) -> Tensor: return processed_image def forward(self, image: Tensor) -> Tensor: - """ - Forward-Pass input tensor to the model. + """Forward-Pass input tensor to the model. Args: image (Tensor): Input tensor. @@ -165,8 +157,7 @@ def forward(self, image: Tensor) -> Tensor: return self.model(image) def post_process(self, predictions: Tensor, meta_data: Optional[Dict] = None) -> np.ndarray: - """ - Post process the output predictions. + """Post process the output predictions. Args: predictions (Tensor): Raw output predicted by the model. @@ -177,7 +168,6 @@ def post_process(self, predictions: Tensor, meta_data: Optional[Dict] = None) -> Returns: np.ndarray: Post processed predictions that are ready to be visualized. """ - if meta_data is None: meta_data = {} @@ -190,8 +180,7 @@ def post_process(self, predictions: Tensor, meta_data: Optional[Dict] = None) -> class OpenVINOInferencer(Inferencer): - """ - OpenVINO implementation for the inference. + """OpenVINO implementation for the inference. Args: config (DictConfig): Configurable parameters that are used @@ -204,8 +193,7 @@ def __init__(self, config: Union[DictConfig, ListConfig], path: Union[str, Path, self.input_blob, self.output_blob, self.network = self.load_model(path) def load_model(self, path: Union[str, Path, Tuple[bytes, bytes]]): - """ - Load the OpenVINO model. + """Load the OpenVINO model. Args: path (Union[str, Path, Tuple[bytes, bytes]]): Path to the onnx or xml and bin files @@ -240,8 +228,7 @@ def load_model(self, path: Union[str, Path, Tuple[bytes, bytes]]): return input_blob, output_blob, executable_network def pre_process(self, image: np.ndarray) -> np.ndarray: - """ - Pre process the input image by applying transformations. + """Pre process the input image by applying transformations. Args: image (np.ndarray): Input image. @@ -261,8 +248,7 @@ def pre_process(self, image: np.ndarray) -> np.ndarray: return processed_image def forward(self, image: np.ndarray) -> np.ndarray: - """ - Forward-Pass input tensor to the model. + """Forward-Pass input tensor to the model. Args: image (np.ndarray): Input tensor. @@ -273,8 +259,7 @@ def forward(self, image: np.ndarray) -> np.ndarray: return self.network.infer(inputs={self.input_blob: image}) def post_process(self, predictions: np.ndarray, meta_data: Optional[Dict] = None) -> np.ndarray: - """ - Post process the output predictions. + """Post process the output predictions. Args: predictions (np.ndarray): Raw output predicted by the model. diff --git a/anomalib/core/model/kde.py b/anomalib/core/model/kde.py index a242ef8ad1..0608f707f7 100644 --- a/anomalib/core/model/kde.py +++ b/anomalib/core/model/kde.py @@ -1,6 +1,4 @@ -""" -Gaussian Kernel Density Estimation -""" +"""Gaussian Kernel Density Estimation.""" # Copyright (C) 2020 Intel Corporation # @@ -25,9 +23,7 @@ class GaussianKDE(DynamicBufferModule): - """ - Gaussian Kernel Density Estimation - """ + """Gaussian Kernel Density Estimation.""" def __init__(self, dataset: Optional[torch.Tensor] = None): super().__init__() @@ -44,14 +40,12 @@ def __init__(self, dataset: Optional[torch.Tensor] = None): self.norm = torch.Tensor() def forward(self, features: torch.Tensor) -> torch.Tensor: - """ - Get the KDE estimates from the feature map. + """Get the KDE estimates from the feature map. Args: features: torch.Tensor: Feature map extracted from the CNN Returns: KDE Estimates - """ features = torch.matmul(features, self.bw_transform) @@ -63,15 +57,14 @@ def forward(self, features: torch.Tensor) -> torch.Tensor: return estimate - def fit(self, dataset: torch.Tensor): - """ - Fit a KDE model to the input dataset. + def fit(self, dataset: torch.Tensor) -> None: + """Fit a KDE model to the input dataset. Args: dataset: torch.Tensor: Input dataset. Returns: - + None """ num_samples, dimension = dataset.shape @@ -96,8 +89,7 @@ def fit(self, dataset: torch.Tensor): @staticmethod def cov(tensor: torch.Tensor, bias: Optional[bool] = False) -> torch.Tensor: - """ - Calculate covariance matrix. + """Calculate covariance matrix. Args: tensor: torch.Tensor: Input tensor from which covariance matrix is computed. @@ -105,7 +97,6 @@ def cov(tensor: torch.Tensor, bias: Optional[bool] = False) -> torch.Tensor: Returns: Output covariance matrix. - """ mean = torch.mean(tensor, dim=1) tensor -= mean[:, None] diff --git a/anomalib/core/model/multi_variate_gaussian.py b/anomalib/core/model/multi_variate_gaussian.py index ecd0c8f5ed..fb5daf5dc4 100644 --- a/anomalib/core/model/multi_variate_gaussian.py +++ b/anomalib/core/model/multi_variate_gaussian.py @@ -1,6 +1,4 @@ -""" -Multi Variate Gaussian Distribution -""" +"""Multi Variate Gaussian Distribution.""" # Copyright (C) 2020 Intel Corporation # @@ -23,9 +21,7 @@ class MultiVariateGaussian(nn.Module): - """ - Multi Variate Gaussian Distribution - """ + """Multi Variate Gaussian Distribution.""" def __init__(self, n_features, n_patches): super().__init__() @@ -40,7 +36,7 @@ def __init__(self, n_features, n_patches): def _cov( observations: Tensor, rowvar: bool = False, bias: bool = False, ddof: Optional[int] = None, aweights=None ) -> Tensor: - """Estimates covariance matrix like numpy.cov + """Estimates covariance matrix like numpy.cov. Args: observations: A 1-D or 2-D array containing multiple variables and observations. @@ -70,7 +66,6 @@ def _cov( Returns: The covariance matrix of the variables. - """ # ensure at least 2D if observations.dim() == 1: @@ -120,8 +115,7 @@ def _cov( return covariance.squeeze() def forward(self, embedding: Tensor) -> List[Tensor]: - """ - Calculate multivariate Gaussian distribution + """Calculate multivariate Gaussian distribution. Args: embedding: CNN features whose dimensionality is reduced via either random sampling or PCA. @@ -129,7 +123,6 @@ def forward(self, embedding: Tensor) -> List[Tensor]: Returns: mean and inverse covariance of the multi-variate gaussian distribution that fits the features. - """ device = embedding.device @@ -147,14 +140,12 @@ def forward(self, embedding: Tensor) -> List[Tensor]: return [self.mean, self.inv_covariance] def fit(self, embedding: Tensor) -> List[Tensor]: - """ - Fit multi-variate gaussian distribution to the input embedding. + """Fit multi-variate gaussian distribution to the input embedding. Args: embedding: Tensor: Embedding vector extracted from CNN. Returns: Mean and the covariance of the embedding. - """ return self.forward(embedding) diff --git a/anomalib/core/model/pca.py b/anomalib/core/model/pca.py index b300abdc18..9edc82be03 100644 --- a/anomalib/core/model/pca.py +++ b/anomalib/core/model/pca.py @@ -1,6 +1,4 @@ -""" -Principle Component Analysis (PCA) with PyTorch -""" +"""Principle Component Analysis (PCA) with PyTorch.""" # Copyright (C) 2020 Intel Corporation # @@ -25,12 +23,11 @@ class PCA(DynamicBufferModule): - """ - Principle Component Analysis (PCA) + """Principle Component Analysis (PCA). Args: n_components (float): Number of components. Can be either integer number of components - or a ratio between 0-1. + or a ratio between 0-1. """ def __init__(self, n_components: Union[float, int]): @@ -47,8 +44,7 @@ def __init__(self, n_components: Union[float, int]): self.num_components: Tensor def fit(self, dataset: Tensor) -> None: - """ - Fits the PCA model to the dataset + """Fits the PCA model to the dataset. Args: dataset (Tensor): Input dataset to fit the model. @@ -71,13 +67,13 @@ def fit(self, dataset: Tensor) -> None: self.mean = mean def fit_transform(self, dataset: Tensor) -> Tensor: - """ + """Fit and transform PCA to dataset. Args: dataset (Tensor): Dataset to which the PCA if fit and transformed - Returns: Transformed dataset - + Returns: + Transformed dataset """ mean = dataset.mean(dim=0) dataset -= mean @@ -91,21 +87,20 @@ def fit_transform(self, dataset: Tensor) -> Tensor: return torch.matmul(dataset, self.singular_vectors) def transform(self, features: Tensor) -> Tensor: - """ - Transforms the features based on singular vectors calculated earlier. + """Transforms the features based on singular vectors calculated earlier. Args: features (Tensor): Input features - Returns: Transformed features + Returns: + Transformed features """ features -= self.mean return torch.matmul(features, self.singular_vectors) def inverse_transform(self, features: Tensor) -> Tensor: - """ - Inverses the transformed features + """Inverses the transformed features. Args: features (Tensor): Transformed features @@ -116,12 +111,12 @@ def inverse_transform(self, features: Tensor) -> Tensor: return inv_features def forward(self, features: Tensor) -> Tensor: - """ - Transforms the features + """Transforms the features. Args: features (Tensor): Input features - Returns: Transformed features + Returns: + Transformed features """ return self.transform(features) diff --git a/anomalib/core/results/__init__.py b/anomalib/core/results/__init__.py index f793d0f285..0ccb027fdc 100644 --- a/anomalib/core/results/__init__.py +++ b/anomalib/core/results/__init__.py @@ -1,8 +1,4 @@ -""" -Result -This module contains Result dataclass objects to store -classification and segmentation results. -""" +"""This module contains Result dataclass objects to store classification and segmentation results.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/core/results/results.py b/anomalib/core/results/results.py index 592cecff71..73fbb68911 100644 --- a/anomalib/core/results/results.py +++ b/anomalib/core/results/results.py @@ -1,6 +1,4 @@ -""" -Result Set -""" +"""This module defines Result Sets.""" # Copyright (C) 2020 Intel Corporation # @@ -28,12 +26,10 @@ @dataclass class ClassificationResults: - """ - Dataclass to store classification-task results. - A classification task would return a anomaly - classification score, which is used to compute - the overall performance by comparing it with the - true_labels (ground-truth). + """Dataclass to store classification-task results. + + A classification task would return a anomaly classification score, which is used to compute + the overall performance by comparing it with the true_labels (ground-truth). Args: filenames: List[Union[str, Path]] @@ -61,9 +57,7 @@ class ClassificationResults: performance: Dict[str, Any] = field(default_factory=dict) def store_outputs(self, outputs: List[dict]): - """ - Concatenate the outputs from the individual batches and store in the result set - """ + """Concatenate the outputs from the individual batches and store in the result set.""" if "image_path" in outputs[0].keys(): self.filenames = [Path(f) for x in outputs for f in x["image_path"]] self.images = torch.vstack([x["image"] for x in outputs]) @@ -71,9 +65,7 @@ def store_outputs(self, outputs: List[dict]): self.pred_scores = np.hstack([output["pred_scores"].cpu() for output in outputs]) def evaluate(self, threshold: float): - """ - Compute performance metrics - """ + """Compute performance metrics.""" self.pred_labels = self.pred_scores >= threshold self.performance["image_f1_score"] = f1_score(self.true_labels, self.pred_labels) self.performance["balanced_accuracy_score"] = balanced_accuracy_score(self.true_labels, self.pred_labels) @@ -82,12 +74,10 @@ def evaluate(self, threshold: float): @dataclass class SegmentationResults(ClassificationResults): - """ - Dataclass to store segmentation-based task results. - An anomaly segmentation task returns anomaly maps in - addition to anomaly scores, which are then used to - compute anomaly masks to compare against the true - segmentation masks. + """Dataclass to store segmentation-based task results. + + An anomaly segmentation task returns anomaly maps in addition to anomaly scores, which are then used to + compute anomaly masks to compare against the true segmentation masks. Args: anomaly_maps: List[Union[np.ndarray, Tensor]] @@ -109,16 +99,12 @@ class SegmentationResults(ClassificationResults): pred_masks: Optional[np.ndarray] = None def store_outputs(self, outputs: List[dict]): - """ - Concatenate the outputs from the individual batches and store in the result set - """ + """Concatenate the outputs from the individual batches and store in the result set.""" super().store_outputs(outputs) self.true_masks = np.vstack([output["mask"].squeeze(1).cpu() for output in outputs]) self.anomaly_maps = np.vstack([output["anomaly_maps"].cpu() for output in outputs]) def evaluate(self, threshold: float): - """ - First compute common metrics, then compute segmentation-specific metrics - """ + """First compute common metrics, then compute segmentation-specific metrics.""" super().evaluate(threshold) self.performance["pixel_roc_auc"] = roc_auc_score(self.true_masks.flatten(), self.anomaly_maps.flatten()) diff --git a/anomalib/datasets/__init__.py b/anomalib/datasets/__init__.py index 70ab14d991..5e46b3825c 100644 --- a/anomalib/datasets/__init__.py +++ b/anomalib/datasets/__init__.py @@ -1,6 +1,4 @@ -""" -Anomalib Datasets -""" +"""Anomalib Datasets.""" # Copyright (C) 2020 Intel Corporation # @@ -26,8 +24,7 @@ def get_datamodule(config: Union[DictConfig, ListConfig]): - """ - Get Anomaly Datamodule + """Get Anomaly Datamodule. Args: config: Configuration of the anomaly model @@ -35,7 +32,6 @@ def get_datamodule(config: Union[DictConfig, ListConfig]): Returns: PyTorch Lightning DataModule - """ datamodule: LightningDataModule diff --git a/anomalib/datasets/anomaly_dataset.py b/anomalib/datasets/anomaly_dataset.py index 4f2dfc205a..6407a5c85f 100644 --- a/anomalib/datasets/anomaly_dataset.py +++ b/anomalib/datasets/anomaly_dataset.py @@ -1,8 +1,8 @@ -""" -Anomaly Dataset -This script contains PyTorch Dataset, Dataloader and PyTorch Lightning -DataModule for the Anomaly dataset. If the dataset is not on the file -system, the script downloads and extracts the dataset from URL and create +"""Anomaly Dataset. + +This script contains PyTorch Dataset, Dataloader and PyTorch Lightning DataModule for the Anomaly dataset. + +If the dataset is not on the file system, the script downloads and extracts the dataset from URL and create PyTorch data objects. """ @@ -57,14 +57,12 @@ def split_normal_images_in_train_set(samples: DataFrame, split_ratio: float = 0.1, seed: int = 0) -> DataFrame: - """ - split_normal_images_in_train_set - This function splits the normal images in training set and assigns the - values to the test set. This is particularly useful especially when the - test set does not contain any normal images. + """This function splits the normal images in training set and assigns the values to the test set. - This is important because when the test set doesn't have any normal images, - AUC computation fails due to having single class. + This is particularly useful especially when the test set does not contain any normal images. + + This is important because when the test set doesn't have any normal images, + AUC computation fails due to having single class. Args: samples (DataFrame): Dataframe containing dataset info such as filenames, splits etc. @@ -89,9 +87,7 @@ def split_normal_images_in_train_set(samples: DataFrame, split_ratio: float = 0. def create_validation_set_from_test_set(samples: DataFrame, seed: int = 0) -> DataFrame: - """ - This function creates a validation set from test set by splitting both - normal and abnormal samples to two. + """This function creates a validation set from test set by splitting both normal and abnormal samples to two. Args: samples (DataFrame): Dataframe containing dataset info such as filenames, splits etc. @@ -119,9 +115,9 @@ def create_validation_set_from_test_set(samples: DataFrame, seed: int = 0) -> Da def make_dataset(path: Path, split_ratio: float = 0.1, seed: int = 0, create_validation_set: bool = False) -> DataFrame: - """ - This function creates MVTec samples by parsing the MVTec data file structure, based on the following - structure: + """Create MVTec samples by parsing the MVTec data file structure. + + The files are expected to follow the structure: path/to/dataset/split/category/image_filename.png path/to/dataset/ground_truth/category/mask_filename.png @@ -198,9 +194,7 @@ def make_dataset(path: Path, split_ratio: float = 0.1, seed: int = 0, create_val class BaseAnomalyDataset(VisionDataset): - """ - Anomaly PyTorch Dataset - """ + """Anomaly PyTorch Dataset.""" _TARGET_FILE_EXT: str _SPLIT: str @@ -234,9 +228,7 @@ def __init__( raise RuntimeError(f"Found 0 images in {self.category / self._SPLIT}") def _download(self) -> None: - """ - Download the Anomaly dataset from URL - """ + """Download the Anomaly dataset from URL.""" if (self.root / self.category).is_dir(): logger.warning("Dataset directory exists.") else: @@ -263,36 +255,40 @@ def _download(self) -> None: self._clean() def _extract(self) -> None: - """ - Extract Anomaly Dataset - """ + """Extract Anomaly Dataset.""" logger.info("Extracting Anomaly dataset") with tarfile.open(self.filename) as file: file.extractall(self.root) def _clean(self) -> None: - """ - Cleanup Anomaly Dataset tar file. - """ + """Cleanup Anomaly Dataset tar file.""" logger.info("Cleaning up the tar file") self.filename.unlink() def __len__(self) -> int: + """Return length of samples.""" return len(self.samples) @abc.abstractmethod def __getitem__(self, index: int) -> Any: + """Get item from dataset.""" raise NotImplementedError() class AnomalyTrainDS(BaseAnomalyDataset): - """ - Anomaly Training dataset - """ + """Anomaly Training dataset.""" _SPLIT = "train" def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: + """Get item from train dataset. + + Args: + index (int): Index of the item to fetch. + + Returns: + Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dictionary containing image tensor. + """ image_path = self.samples.image_path[index] image = read_image(image_path) @@ -304,13 +300,20 @@ def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[st class AnomalyTestClassificationDS(BaseAnomalyDataset): - """ - Anomaly classification - test dataset - """ + """Anomaly classification - test dataset.""" _SPLIT = "test" def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: + """Get item from train dataset. + + Args: + index (int): Index of the item to fetch. + + Returns: + Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dictionary containing image path, image tensor, + and label index. + """ image_path = self.samples.image_path[index] label_index = self.samples.label_index[index] @@ -328,14 +331,21 @@ def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[st class AnomalyTestSegmentationDS(BaseAnomalyDataset): - """ - Anomaly segmentation - test dataset - """ + """Anomaly segmentation - test dataset.""" _TARGET_FILE_EXT = "_mask.png" _SPLIT = "test" def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: + """Get single instance from dataset. + + Args: + index (int): Index of the item to fetch. + + Returns: + Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict containing image path, mask path, + image tensor, label and mask tensor. + """ image_path = self.samples.image_path[index] mask_path = self.samples.target_path[index] label_index = self.samples.label_index[index] @@ -362,9 +372,7 @@ def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[st class AnomalyTestDetectionDS(BaseAnomalyDataset): - """ - Anomaly detection - test dataset - """ + """Anomaly detection - test dataset.""" _TARGET_FILE_EXT = ".xml" _SPLIT = "test" @@ -397,6 +405,15 @@ def __init__( raise ValueError(f"Unknown data annotation format: {self.label_format}!") def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: + """Get single instance from dataset. + + Args: + index (int): Index of the item to fetch. + + Returns: + Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict containing image path, target path, + image tensor, label and transformed bounding box. + """ image_path = self.samples.image_path[index] target_path = self.samples.target_path[index] label_index = self.samples.label_index[index] @@ -422,10 +439,9 @@ def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[st class AnomalyDataModule(LightningDataModule): - """ - Anomaly data Lightning Module + """Anomaly data Lightning Module. - init parameters: + Args: root: folder containing the dataset url: web link to download a dataset category: subcategory or class label @@ -477,10 +493,7 @@ def __init__( raise ValueError(f"Unknown task type: {self.task}!") def prepare_data(self): - """ - prepare_data - download training data if not available - """ + """Download training data if not available.""" # Training Data self.train_dataset( @@ -504,9 +517,7 @@ def prepare_data(self): ) def setup(self, stage: Optional[str] = None) -> None: - """ - setup: - Data preparation - split for train, test & val + """Data preparation - split for train, test & val. Args: stage: optional argument to specify if train or test @@ -539,9 +550,7 @@ def setup(self, stage: Optional[str] = None) -> None: ) def train_dataloader(self) -> DataLoader: - """ - Train Dataloader - """ + """Train Dataloader.""" return DataLoader( self.train_data, shuffle=False, @@ -550,9 +559,7 @@ def train_dataloader(self) -> DataLoader: ) def val_dataloader(self) -> DataLoader: - """ - Validation Dataloader - """ + """Validation Dataloader.""" return DataLoader( self.test_data, shuffle=False, @@ -561,9 +568,7 @@ def val_dataloader(self) -> DataLoader: ) def test_dataloader(self) -> DataLoader: - """ - Test Dataloader - """ + """Test Dataloader.""" return DataLoader( self.test_data, shuffle=False, diff --git a/anomalib/datasets/mvtec.py b/anomalib/datasets/mvtec.py index 7745d7f207..e42c44c228 100644 --- a/anomalib/datasets/mvtec.py +++ b/anomalib/datasets/mvtec.py @@ -1,9 +1,7 @@ -""" -MVTec -This script contains PyTorch Dataset, Dataloader and PyTorch Lightning -DataModule for the MVTec dataset. If the dataset is not on the file -system, the script downloads and extracts the dataset and create -PyTorch data objects. +"""MVTec This script contains PyTorch Dataset, Dataloader and PyTorch Lightning DataModule for the MVTec dataset. + +If the dataset is not on the file system, the script downloads and +extracts the dataset and create PyTorch data objects. """ # Copyright (C) 2020 Intel Corporation @@ -49,14 +47,11 @@ def split_normal_images_in_train_set(samples: DataFrame, split_ratio: float = 0.1, seed: int = 0) -> DataFrame: - """ - split_normal_images_in_train_set - This function splits the normal images in training set and assigns the - values to the test set. This is particularly useful especially when the - test set does not contain any normal images. + """This function splits the normal images in training set and assigns the values to the test set. - This is important because when the test set doesn't have any normal images, - AUC computation fails due to having single class. + This is particularly useful especially when the test set does not contain any normal images. + This is important because when the test set doesn't have any normal images, + AUC computation fails due to having single class. Args: samples (DataFrame): Dataframe containing dataset info such as filenames, splits etc. @@ -80,9 +75,9 @@ def split_normal_images_in_train_set(samples: DataFrame, split_ratio: float = 0. def make_mvtec_dataset(path: Path, split: str = "train", split_ratio: float = 0.1, seed: int = 0) -> DataFrame: - """ - This function creates MVTec samples by parsing the MVTec data file structure, based on the following - structure: + """Create MVTec samples by parsing the MVTec data file structure. + + The files are expected to follow the structure: path/to/dataset/split/category/image_filename.png path/to/dataset/ground_truth/category/mask_filename.png @@ -164,12 +159,10 @@ def make_mvtec_dataset(path: Path, split: str = "train", split_ratio: float = 0. def get_image_transforms(image_size: Union[Sequence, int], crop_size: Union[Sequence, int]) -> T.Compose: - """ - Get default ImageNet image transformations. + """Get default ImageNet image transformations. Returns: T.Compose: List of imagenet transformations. - """ crop_size = image_size if crop_size is None else crop_size transform = T.Compose( @@ -184,12 +177,10 @@ def get_image_transforms(image_size: Union[Sequence, int], crop_size: Union[Sequ def get_mask_transforms(image_size: Union[Sequence, int], crop_size: Union[Sequence, int]) -> T.Compose: - """ - Get default ImageNet transformations for the ground-truth image masks. + """Get default ImageNet transformations for the ground-truth image masks. Returns: T.Compose: List of imagenet transformations. - """ crop_size = image_size if crop_size is None else crop_size transform = Compose( @@ -203,9 +194,7 @@ def get_mask_transforms(image_size: Union[Sequence, int], crop_size: Union[Seque class MVTec(VisionDataset): - """ - MVTec PyTorch Dataset - """ + """MVTec PyTorch Dataset.""" def __init__( self, @@ -230,9 +219,7 @@ def __init__( self.samples = make_mvtec_dataset(path=self.root / category, split=self.split) def _download(self) -> None: - """ - Download the MVTec dataset - """ + """Download the MVTec dataset.""" if (self.root / self.category).is_dir(): logger.warning("Dataset directory exists.") else: @@ -252,24 +239,30 @@ def _download(self) -> None: self._clean() def _extract(self) -> None: - """ - Extract MVTec Dataset - """ + """Extract MVTec Dataset.""" logger.info("Extracting MVTec dataset") with tarfile.open(self.filename) as file: file.extractall(self.root) def _clean(self) -> None: - """ - Cleanup MVTec Dataset tar file. - """ + """Cleanup MVTec Dataset tar file.""" logger.info("Cleaning up the tar file") self.filename.unlink() def __len__(self) -> int: + """Return length of dataset.""" return len(self.samples) def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: + """Get single instance from dataset. + + Args: + index (int): Index of the item to fetch. + + Returns: + Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict of image tensor during training. + Otherwise, Dict containing image path, target path, image tensor, label and transformed bounding box. + """ image_path = self.samples.image_path[index] mask_path = self.samples.mask_path[index] label_index = self.samples.label_index[index] @@ -299,9 +292,7 @@ def __getitem__(self, index: int) -> Union[Dict[str, Tensor], Dict[str, Union[st class MVTecDataModule(LightningDataModule): - """ - MVTec Lightning Data Module - """ + """MVTec Lightning Data Module.""" def __init__( self, @@ -339,9 +330,7 @@ def __init__( self.val_data: Dataset def prepare_data(self): - """ - Prepare MVTec Dataset - """ + """Prepare MVTec Dataset.""" # Train MVTec( @@ -364,12 +353,10 @@ def prepare_data(self): ) def setup(self, stage: Optional[str] = None) -> None: - """ - Setup train, validation and test data. + """Setup train, validation and test data. Args: stage: Optional[str]: (Default value = None) - """ self.val_data = MVTec( root=self.root, @@ -388,15 +375,15 @@ def setup(self, stage: Optional[str] = None) -> None: ) def train_dataloader(self) -> DataLoader: - """Get train dataloader""" + """Get train dataloader.""" return DataLoader( self.train_data, shuffle=False, batch_size=self.train_batch_size, num_workers=self.num_workers ) def val_dataloader(self) -> DataLoader: - """Get validation dataloader""" + """Get validation dataloader.""" return DataLoader(self.val_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers) def test_dataloader(self) -> DataLoader: - """Get test dataloader""" + """Get test dataloader.""" return DataLoader(self.val_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers) diff --git a/anomalib/datasets/parser.py b/anomalib/datasets/parser.py index 201ea59537..13233794c7 100644 --- a/anomalib/datasets/parser.py +++ b/anomalib/datasets/parser.py @@ -1,6 +1,6 @@ -""" -This script contains parsers for different annotations for object detection task. - Parsers include pascal-voc. +"""This script contains parsers for different annotations for object detection task. + +Parsers include pascal-voc. """ # Copyright (C) 2020 Intel Corporation @@ -27,8 +27,7 @@ class PascalVocReader: - """ - Data parser for Pascal-VOC labels + """Data parser for Pascal-VOC labels. Args: file_path (str): Path to XML file @@ -45,7 +44,8 @@ def __init__(self, file_path: str): self.parse_xml() def get_shapes(self) -> Dict[str, Union[List, Any]]: - """ + """Return bounding boxes and associated labels. + Returns: annotated bounding boxes and corresponding labels """ @@ -53,10 +53,11 @@ def get_shapes(self) -> Dict[str, Union[List, Any]]: return {"boxes": self.boxes, "labels": self.labels} def add_shape(self, label: str, bnd_box: ElementTree.Element): - """ + """Extract bounding box from the xml element and store it in a list. + Args: - label: label for target object - bnd_box: bounding box coordinates + label (str): label for target object + bnd_box (ElementTree): bounding box coordinates """ _x_min, _y_min, _x_max, _y_max = ( bnd_box.find("xmin"), @@ -74,9 +75,7 @@ def add_shape(self, label: str, bnd_box: ElementTree.Element): self.labels.append(label) def parse_xml(self): - """ - Function to read xml file and parse annotations - """ + """Function to read xml file and parse annotations.""" if self.file_path.endswith(self._xml_ext): parser = XMLParser(encoding=self._encode_method) diff --git a/anomalib/datasets/tiler.py b/anomalib/datasets/tiler.py index d737bf8db7..e3850a2326 100644 --- a/anomalib/datasets/tiler.py +++ b/anomalib/datasets/tiler.py @@ -1,6 +1,4 @@ -""" -Image Tiler -""" +"""Image Tiler.""" # Copyright (C) 2020 Intel Corporation # @@ -27,15 +25,12 @@ class StrideSizeError(Exception): - """ - StrideSizeError to raise exception when stride - size is greater than the tile size. - """ + """StrideSizeError to raise exception when stride size is greater than the tile size.""" def compute_new_image_size(image_size: Tuple, tile_size: Tuple, stride: Tuple) -> Tuple: - """ - This function checks if image size is divisible by tile size and stride. + """This function checks if image size is divisible by tile size and stride. + If not divisible, it resizes the image size to make it divisible. Args: @@ -55,9 +50,7 @@ def compute_new_image_size(image_size: Tuple, tile_size: Tuple, stride: Tuple) - """ def __compute_new_edge_size(edge_size: int, tile_size: int, stride: int) -> int: - """ - This function makes the resizing within the edge level. - """ + """This function makes the resizing within the edge level.""" if (edge_size - tile_size) % stride != 0: edge_size = (ceil((edge_size - tile_size) / stride) * stride) + tile_size @@ -70,8 +63,7 @@ def __compute_new_edge_size(edge_size: int, tile_size: int, stride: int) -> int: def upscale_image(image: Tensor, size: Tuple, mode: str = "padding") -> Tensor: - """ - Upscale image to the desired size via either padding or interpolation. + """Upscale image to the desired size via either padding or interpolation. Args: image (Tensor): Image @@ -110,8 +102,7 @@ def upscale_image(image: Tensor, size: Tuple, mode: str = "padding") -> Tensor: def downscale_image(image: Tensor, size: Tuple, mode: str = "padding") -> Tensor: - """ - Opposite of upscaling. This image downscales image to a desired size. + """Opposite of upscaling. This image downscales image to a desired size. Args: image (Tensor): Input image @@ -138,8 +129,7 @@ def downscale_image(image: Tensor, size: Tuple, mode: str = "padding") -> Tensor class Tiler: - """ - Tile Image into (non)overlapping Patches. Images are tiled inorder to efficiently process large images. + """Tile Image into (non)overlapping Patches. Images are tiled in order to efficiently process large images. Args: tile_size: Tile dimension for each patch @@ -220,26 +210,24 @@ def __validate_size_type(parameter) -> Tuple: return output def __random_tile(self, image: Tensor) -> Tensor: - """ - Randomly crop tiles from the given image + """Randomly crop tiles from the given image. Args: image: input image to be cropped Returns: Randomly cropped tiles from the image - """ return torch.vstack([T.RandomCrop(self.tile_size_h)(image) for i in range(self.tile_count)]) def __unfold(self, tensor: Tensor) -> Tensor: - """ - Unfolds tensor into tiles. This is the core function to perform tiling operation. + """Unfolds tensor into tiles. + + This is the core function to perform tiling operation. Args: tensor: Input tensor from which tiles are generated. Returns: Generated tiles - """ # identify device type based on input tensor @@ -275,16 +263,15 @@ def __unfold(self, tensor: Tensor) -> Tensor: return tiles def __fold(self, tiles: Tensor) -> Tensor: - """ - Fold the tiles back into the original tensor. This is the core method to reconstruct - the original image from its tiled version. + """Fold the tiles back into the original tensor. + + This is the core method to reconstruct the original image from its tiled version. Args: tiles: Tiles from the input image, generated via __unfold method. Returns: Output that is the reconstructed version of the input tensor. - """ # number of channels differs between image and anomaly map, so infer from input tiles. _, num_channels, tile_size_h, tile_size_w = tiles.shape @@ -352,8 +339,7 @@ def __fold(self, tiles: Tensor) -> Tensor: return img def tile(self, image: Tensor, use_random_tiling: Optional[bool] = False) -> Tensor: - """ - Tiles an input image to either overlapping, non-overlapping or random patches. + """Tiles an input image to either overlapping, non-overlapping or random patches. Args: image: Input image to tile. @@ -370,7 +356,6 @@ def tile(self, image: Tensor, use_random_tiling: Optional[bool] = False) -> Tens Returns: Tiles generated from the image. - """ if image.dim() == 3: image = image.unsqueeze(0) @@ -398,16 +383,15 @@ def tile(self, image: Tensor, use_random_tiling: Optional[bool] = False) -> Tens return image_tiles def untile(self, tiles: Tensor) -> Tensor: - """ - Untiles patches to reconstruct the original input image. If patches, are overlapping - patches, the function averages the overlapping pixels, and return the reconstructed - image. + """Untiles patches to reconstruct the original input image. + + If patches, are overlapping patches, the function averages the overlapping pixels, + and return the reconstructed image. Args: tiles: Tiles from the input image, generated via tile().. Examples: - >>> from anomalib.datasets.tiler import Tiler >>> tiler = Tiler(tile_size=512,stride=256) >>> image = torch.rand(size=(2, 3, 1024, 1024)) @@ -424,7 +408,6 @@ def untile(self, tiles: Tensor) -> Tensor: Returns: Output that is the reconstructed version of the input tensor. - """ image = self.__fold(tiles) image = downscale_image(image=image, size=(self.input_h, self.input_w), mode=self.mode) diff --git a/anomalib/datasets/transforms/__init__.py b/anomalib/datasets/transforms/__init__.py index 03e11e37a2..5c648d01e7 100644 --- a/anomalib/datasets/transforms/__init__.py +++ b/anomalib/datasets/transforms/__init__.py @@ -1,6 +1,4 @@ -""" -Anomalib Data Transforms -""" +"""Anomalib Data Transforms.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/datasets/transforms/pre_process.py b/anomalib/datasets/transforms/pre_process.py index 12c7349822..775325504e 100644 --- a/anomalib/datasets/transforms/pre_process.py +++ b/anomalib/datasets/transforms/pre_process.py @@ -1,8 +1,4 @@ -""" -Pre Process -This module contains `PreProcessor` class that applies preprocessing -to an input image before the forward-pass stage. -""" +"""`PreProcessor` class that applies preprocessing to an input image before the forward-pass stage.""" # Copyright (C) 2020 Intel Corporation # @@ -25,12 +21,11 @@ class PreProcessor: - """ - PreProcessor class applies pre-processing and data augmentations - to the input and returns the transformed output, which could be - either numpy ndarray or torch tensor. When `PreProcessor` class is - used for training, the output would be `torch.Tensor`. For the inference - it returns a numpy array + """Applies pre-processing and data augmentations to the input and returns the transformed output. + + Output could be either numpy ndarray or torch tensor. + When `PreProcessor` class is used for training, the output would be `torch.Tensor`. + For the inference it returns a numpy array. Args: config (Optional[Union[str, A.Compose]], optional): Transformation configurations. @@ -57,24 +52,23 @@ class PreProcessor: Transforms could be read from albumentations Compose object. - >>> import albumentations as A - >>> from albumentations.pytorch import ToTensorV2 - >>> config = A.Compose([A.Resize(512, 512), ToTensorV2()]) - >>> pre_processor = PreProcessor(config=config, to_tensor=False) - >>> output = pre_processor(image=image) - >>> output["image"].shape - (512, 512, 3) - >>> type(output["image"]) - numpy.ndarray + >>> import albumentations as A + >>> from albumentations.pytorch import ToTensorV2 + >>> config = A.Compose([A.Resize(512, 512), ToTensorV2()]) + >>> pre_processor = PreProcessor(config=config, to_tensor=False) + >>> output = pre_processor(image=image) + >>> output["image"].shape + (512, 512, 3) + >>> type(output["image"]) + numpy.ndarray Transforms could be deserialized from a yaml file. - >>> transforms = A.Compose([A.Resize(1024, 1024), ToTensorV2()]) - >>> A.save(transforms, "/tmp/transforms.yaml", data_format="yaml") - - >>> pre_processor = PreProcessor(config="/tmp/transforms.yaml") - >>> output = pre_processor(image=image) - >>> output["image"].shape - torch.Size([3, 1024, 1024]) + >>> transforms = A.Compose([A.Resize(1024, 1024), ToTensorV2()]) + >>> A.save(transforms, "/tmp/transforms.yaml", data_format="yaml") + >>> pre_processor = PreProcessor(config="/tmp/transforms.yaml") + >>> output = pre_processor(image=image) + >>> output["image"].shape + torch.Size([3, 1024, 1024]) """ def __init__( @@ -90,8 +84,7 @@ def __init__( self.transforms = self.get_transforms() def get_transforms(self) -> A.Compose: - """ - Get transforms from config or image size + """Get transforms from config or image size. Returns: A.Compose: List of albumentation transformations to apply to the @@ -138,4 +131,5 @@ def get_transforms(self) -> A.Compose: return transforms def __call__(self, *args, **kwargs): + """Return transformed arguments.""" return self.transforms(*args, **kwargs) diff --git a/anomalib/datasets/utils.py b/anomalib/datasets/utils.py index 218b7ac554..927dec5eac 100644 --- a/anomalib/datasets/utils.py +++ b/anomalib/datasets/utils.py @@ -1,6 +1,4 @@ -""" -Dataset Utils -""" +"""Dataset Utils.""" # Copyright (C) 2020 Intel Corporation # @@ -24,11 +22,10 @@ def read_image(path: str) -> np.ndarray: - """ - read_image - reads image from disk in RGB format + """Read image from disk in RGB format. + Args: - path: path to the image file + path (str): path to the image file Returns: image as numpy array @@ -40,8 +37,11 @@ def read_image(path: str) -> np.ndarray: class Denormalize: - """ - Denormalize Torch Tensor into np image format. + """Denormalize Torch Tensor into np image format. + + Args: + mean (Optional[List[float]], optional): Mean used for denormalizing. Defaults to None. + std (Optional[List[float]], optional): Standard deviation used for denormalizing. Defaults to None. """ def __init__(self, mean: Optional[List[float]] = None, std: Optional[List[float]] = None): @@ -56,15 +56,13 @@ def __init__(self, mean: Optional[List[float]] = None, std: Optional[List[float] self.std = Tensor(std) def __call__(self, tensor: Tensor) -> np.ndarray: - """ - Denormalize the input + """Denormalize the input. Args: - tensor: Input tensor image (C, H, W) + tensor (Tensor): Input tensor image (C, H, W) Returns: Denormalized numpy array (H, W, C). - """ if tensor.dim() == 4: @@ -79,17 +77,29 @@ def __call__(self, tensor: Tensor) -> np.ndarray: array = (tensor * 255).permute(1, 2, 0).cpu().numpy().astype(np.uint8) return array - def __repr__(self): + def __repr__(self) -> str: + """Prints `Denormalize()`. + + Returns: + (str): Return string with class name + """ return self.__class__.__name__ + "()" class ToNumpy: - """ - Convert Tensor into Numpy Array - """ + """Convert Tensor into Numpy Array.""" def __call__(self, tensor: Tensor, dims: Optional[Tuple[int, ...]] = None) -> np.ndarray: + """Convert torch tensor to numpy. + Args: + tensor (Tensor): Input tensor in range 0-1 + dims (Optional[Tuple[int, ...]], optional): Tuple corresponding to axis permutation from torch tensor to + numpy array. Defaults to None. + + Returns: + np.ndarray: Converted numpy array in range 0-255 + """ # Default support is (C, H, W) or (N, C, H, W) if dims is None: dims = (0, 2, 3, 1) if len(tensor.shape) == 4 else (1, 2, 0) @@ -103,5 +113,10 @@ def __call__(self, tensor: Tensor, dims: Optional[Tuple[int, ...]] = None) -> np return array - def __repr__(self): + def __repr__(self) -> str: + """Return `ToNumpy()`. + + Returns: + str: Class name + """ return self.__class__.__name__ + "()" diff --git a/anomalib/loggers/__init__.py b/anomalib/loggers/__init__.py index 2519b7d86c..d38495796c 100644 --- a/anomalib/loggers/__init__.py +++ b/anomalib/loggers/__init__.py @@ -1,6 +1,4 @@ -""" -Load PyTorch Lightning Loggers. -""" +"""Load PyTorch Lightning Loggers.""" # Copyright (C) 2020 Intel Corporation # @@ -31,14 +29,11 @@ class UnknownLogger(Exception): - """ - This is raised when the logger option in config.yaml file is set incorrectly. - """ + """This is raised when the logger option in config.yaml file is set incorrectly.""" def get_logger(config: Union[DictConfig, ListConfig]) -> Union[LightningLoggerBase, bool]: - """ - Return a logger based on the choice of logger in the config file. + """Return a logger based on the choice of logger in the config file. Args: config (DictConfig): config.yaml file for the corresponding anomalib model. @@ -49,7 +44,6 @@ def get_logger(config: Union[DictConfig, ListConfig]) -> Union[LightningLoggerBa Returns: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool]: Logger """ - logger: Union[LightningLoggerBase, bool] if config.project.logger in [None, False]: diff --git a/anomalib/loggers/base.py b/anomalib/loggers/base.py index 074f171996..c515e374ba 100644 --- a/anomalib/loggers/base.py +++ b/anomalib/loggers/base.py @@ -1,4 +1,4 @@ -"""Base logger for image logging consistency across all loggers used in anomalib""" +"""Base logger for image logging consistency across all loggers used in anomalib.""" # Copyright (C) 2020 Intel Corporation # @@ -22,9 +22,9 @@ class ImageLoggerBase: - """Adds a common interface for logging the images""" + """Adds a common interface for logging the images.""" @abstractmethod def add_image(self, image: Union[np.ndarray, Figure], name: Optional[str] = None, **kwargs: Any) -> None: - """Interface to log images in the respective loggers""" + """Interface to log images in the respective loggers.""" raise NotImplementedError() diff --git a/anomalib/loggers/tensorboard.py b/anomalib/loggers/tensorboard.py index 481594f588..3d829fbb6c 100644 --- a/anomalib/loggers/tensorboard.py +++ b/anomalib/loggers/tensorboard.py @@ -1,6 +1,4 @@ -""" -tensorboard logger with add image interface -""" +"""Tensorboard logger with add image interface.""" # Copyright (C) 2020 Intel Corporation # @@ -27,7 +25,8 @@ class AnomalibTensorBoardLogger(ImageLoggerBase, TensorBoardLogger): - """Logger for tensorboard + """Logger for tensorboard. + Adds interface for `add_image` in the logger rather than calling the experiment object. The rest is same as the Tensorboard Logger provided by PyTorch Lightning and the doc string for which is reporduced below @@ -36,11 +35,10 @@ class AnomalibTensorBoardLogger(ImageLoggerBase, TensorBoardLogger): preinstalled. Example: - - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.loggers import TensorBoardLogger - >>> logger = TensorBoardLogger("tb_logs", name="my_model") - >>> trainer = Trainer(logger=logger) + >>> from pytorch_lightning import Trainer + >>> from pytorch_lightning.loggers import TensorBoardLogger + >>> logger = TensorBoardLogger("tb_logs", name="my_model") + >>> trainer = Trainer(logger=logger) Args: save_dir: Save directory @@ -58,7 +56,6 @@ class AnomalibTensorBoardLogger(ImageLoggerBase, TensorBoardLogger): prefix: A string to put at the beginning of metric keys. **kwargs: Additional arguments like `comment`, `filename_suffix`, etc. used by :class:`SummaryWriter` can be passed as keyword arguments in this logger. - """ def __init__( @@ -83,12 +80,12 @@ def __init__( @rank_zero_only def add_image(self, image: Union[np.ndarray, Figure], name: Optional[str] = None, **kwargs: Any): - """Interface to add image to tensorboard logger + """Interface to add image to tensorboard logger. Args: - image (np.ndarray): Image to log - name Optional (str): The tag of the image - global_step (int): The step at which to log the image + image (Union[np.ndarray, Figure]): Image to log + name (Optional[str]): The tag of the image + kwargs: Accepts only `global_step` (int). The step at which to log the image. """ if "global_step" not in kwargs: raise ValueError("`global_step` is required for tensorboard logger") diff --git a/anomalib/models/__init__.py b/anomalib/models/__init__.py index ec2edc96e8..0b01bb8c3a 100644 --- a/anomalib/models/__init__.py +++ b/anomalib/models/__init__.py @@ -1,6 +1,4 @@ -""" -Load Anomaly Model -""" +"""Load Anomaly Model.""" # Copyright (C) 2020 Intel Corporation # @@ -28,6 +26,7 @@ def get_model(config: Union[DictConfig, ListConfig]) -> AnomalyModule: """Load model from the configuration file. + Works only when the convention for model naming is followed. The convention for writing model classes is diff --git a/anomalib/models/dfkde/__init__.py b/anomalib/models/dfkde/__init__.py index a55ebe54f9..1479c15b3a 100644 --- a/anomalib/models/dfkde/__init__.py +++ b/anomalib/models/dfkde/__init__.py @@ -1,6 +1,4 @@ -""" -Deep Feature Kernel Density Estimation model -""" +"""Deep Feature Kernel Density Estimation model.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/models/dfkde/model.py b/anomalib/models/dfkde/model.py index a011d40366..fbde6e5961 100644 --- a/anomalib/models/dfkde/model.py +++ b/anomalib/models/dfkde/model.py @@ -1,6 +1,4 @@ -""" -DFKDE: Deep Feature Kernel Density Estimation -""" +"""DFKDE: Deep Feature Kernel Density Estimation.""" # Copyright (C) 2020 Intel Corporation # @@ -29,9 +27,7 @@ class DfkdeLightning(AnomalyModule): - """ - DFKDE: Deep Featured Kernel Density Estimation - """ + """DFKDE: Deep Featured Kernel Density Estimation.""" def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__(hparams) @@ -49,14 +45,11 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): @staticmethod def configure_optimizers(): - """ - DFKDE doesn't require optimization, therefore returns no optimizers. - """ + """DFKDE doesn't require optimization, therefore returns no optimizers.""" return None def training_step(self, batch, _): # pylint: disable=arguments-differ - """Training Step of DFKDE. - For each batch, features are extracted from the CNN. + """Training Step of DFKDE. For each batch, features are extracted from the CNN. Args: batch: Input batch @@ -64,7 +57,6 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ Returns: Deep CNN features. - """ self.feature_extractor.eval() @@ -80,7 +72,7 @@ def training_epoch_end(self, outputs: List[Dict[str, Any]]) -> None: outputs: dict: Returns: - + None """ feature_stack = torch.vstack([output["feature_vector"] for output in outputs]) @@ -88,8 +80,8 @@ def training_epoch_end(self, outputs: List[Dict[str, Any]]) -> None: def validation_step(self, batch, _): # pylint: disable=arguments-differ """Validation Step of DFKDE. - Similar to the training step, features - are extracted from the CNN for each batch. + + Similar to the training step, features are extracted from the CNN for each batch. Args: batch: Input batch @@ -97,7 +89,6 @@ def validation_step(self, batch, _): # pylint: disable=arguments-differ Returns: Dictionary containing probability, prediction and ground truth values. - """ self.feature_extractor.eval() diff --git a/anomalib/models/dfkde/normality_model.py b/anomalib/models/dfkde/normality_model.py index 9572fe8fff..30f8e6754f 100644 --- a/anomalib/models/dfkde/normality_model.py +++ b/anomalib/models/dfkde/normality_model.py @@ -1,6 +1,4 @@ -""" -Normality model of DFKDE -""" +"""Normality model of DFKDE.""" # Copyright (C) 2020 Intel Corporation # @@ -27,9 +25,7 @@ class NormalityModel(nn.Module): - """ - Normality Model for the DFKDE algorithm - """ + """Normality Model for the DFKDE algorithm.""" def __init__( self, @@ -53,8 +49,7 @@ def __init__( self.max_length = torch.Tensor(torch.Size([])) def fit(self, dataset: torch.Tensor): - """ - Fit a kde model to dataset + """Fit a kde model to dataset. Args: dataset: Input dataset to fit the model. @@ -62,7 +57,6 @@ def fit(self, dataset: torch.Tensor): Returns: Boolean confirming whether the training is successful. - """ if dataset.shape[0] < self.n_components: @@ -96,7 +90,7 @@ def preprocess( max_length: Optional[Tensor]: (Default value = None) Returns: - + (Tuple): Stacked features and length """ if max_length is None: @@ -111,23 +105,23 @@ def preprocess( return feature_stack, max_length def evaluate( - self, sem_feats: torch.Tensor, as_density: Optional[bool] = False, as_log_likelihood: Optional[bool] = False + self, features: torch.Tensor, as_density: Optional[bool] = False, as_log_likelihood: Optional[bool] = False ) -> torch.Tensor: - """ - Compute the KDE scores + # TODO + """Compute the KDE scores. Args: - sem_feats: - as_density: - as_log_likelihood: + features (torch.Tensor): Features + as_density (Optional[bool], optional): [description]. Defaults to False. + as_log_likelihood (Optional[bool], optional): [description]. Defaults to False. Returns: - + torch.Tensor: Score """ - sem_feats = self.pca_model.transform(sem_feats) - sem_feats, _ = self.preprocess(sem_feats, self.max_length) - kde_scores = self.kde_model(sem_feats) + features = self.pca_model.transform(features) + features, _ = self.preprocess(features, self.max_length) + kde_scores = self.kde_model(features) # add small constant to avoid zero division in log computation kde_scores += 1e-300 @@ -143,12 +137,10 @@ def predict(self, features: torch.Tensor) -> torch.Tensor: """Predicts the probability that the features belong to the anomalous class. Args: - features: Feature from which the output probabilities are detected. - features: torch.Tensor: + features (torch.Tensor): Feature from which the output probabilities are detected. Returns: Detection probabilities - """ densities = self.evaluate(features, as_density=True, as_log_likelihood=True) @@ -157,8 +149,7 @@ def predict(self, features: torch.Tensor) -> torch.Tensor: return probabilities def to_probability(self, densities: torch.Tensor) -> torch.Tensor: - """Converts density scores to anomaly probabilities - (see https://www.desmos.com/calculator/ifju7eesg7) + """Converts density scores to anomaly probabilities (see https://www.desmos.com/calculator/ifju7eesg7). Args: densities: density of an image @@ -166,13 +157,10 @@ def to_probability(self, densities: torch.Tensor) -> torch.Tensor: Returns: probability that image with {density} is anomalous - """ return 1 / (1 + torch.exp(self.threshold_steepness * (densities - self.threshold_offset))) def forward(self, features: torch.Tensor) -> torch.Tensor: - """ - Make module callable - """ + """Make module callable.""" return self.predict(features) diff --git a/anomalib/models/dfm/__init__.py b/anomalib/models/dfm/__init__.py index bdf3494072..3824007791 100644 --- a/anomalib/models/dfm/__init__.py +++ b/anomalib/models/dfm/__init__.py @@ -1,6 +1,4 @@ -""" -Deep Feature Extraction (DFM) model -""" +"""Deep Feature Extraction (DFM) model.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/models/dfm/dfm_model.py b/anomalib/models/dfm/dfm_model.py index a62d3e227b..038c9237f7 100644 --- a/anomalib/models/dfm/dfm_model.py +++ b/anomalib/models/dfm/dfm_model.py @@ -1,6 +1,4 @@ -""" -Normality model of DFKDE -""" +"""Normality model of DFKDE.""" # Copyright (C) 2020 Intel Corporation # @@ -26,9 +24,7 @@ class SingleClassGaussian(DynamicBufferModule): - """ - Model Gaussian distribution over a set of points - """ + """Model Gaussian distribution over a set of points.""" def __init__(self): super().__init__() @@ -41,8 +37,8 @@ def __init__(self): self.sigma_mat: Tensor def fit(self, dataset: Tensor) -> None: - """ - Fit a Gaussian model to dataset X. + """Fit a Gaussian model to dataset X. + Covariance matrix is not calculated directly using: C = X.X^T Instead, it is represented in terms of the Singular Value Decomposition of X: @@ -61,24 +57,22 @@ def fit(self, dataset: Tensor) -> None: self.u_mat, self.sigma_mat, _ = torch.linalg.svd(data_centered, full_matrices=False) def score_samples(self, features: Tensor) -> Tensor: - """ - Compute the NLL (negative log likelihood) scores + """Compute the NLL (negative log likelihood) scores. Args: features (Tensor): semantic features on which density modeling is performed. Returns: nll (Tensor): Torch tensor of scores - """ features_transformed = torch.matmul(features - self.mean_vec, self.u_mat / self.sigma_mat) nll = torch.sum(features_transformed * features_transformed, dim=1) + 2 * torch.sum(torch.log(self.sigma_mat)) return nll def forward(self, dataset: Tensor) -> None: - """ - Provides the same functionality as `fit`. Transforms the input dataset based on singular values calculated - earlier. + """Provides the same functionality as `fit`. + + Transforms the input dataset based on singular values calculated earlier. Args: dataset (Tensor): Input dataset @@ -87,8 +81,7 @@ def forward(self, dataset: Tensor) -> None: class DFMModel(nn.Module): - """ - Model for the DFM algorithm + """Model for the DFM algorithm. Args: n_comps (float, optional): Ratio from which number of components for PCA are calculated. Defaults to 0.97. @@ -103,8 +96,7 @@ def __init__(self, n_comps: float = 0.97, score_type: str = "fre"): self.score_type = score_type def fit(self, dataset: Tensor) -> None: - """ - Fit a pca transformation and a Gaussian model to dataset + """Fit a pca transformation and a Gaussian model to dataset. Args: dataset (Tensor): Input dataset to fit the model. @@ -115,8 +107,9 @@ def fit(self, dataset: Tensor) -> None: self.gaussian_model.fit(features_reduced.T) def score(self, features: Tensor) -> Tensor: - """ - Compute the PCA-based feature reconstruction error (FRE) scores and + """Compute scores. + + Scores are either PCA-based feature reconstruction error (FRE) scores or the Gaussian density-based NLL scores Args: @@ -124,7 +117,6 @@ def score(self, features: Tensor) -> Tensor: Returns: score (Tensor): numpy array of scores - """ feats_projected = self.pca_model.transform(features) if self.score_type == "nll": @@ -138,9 +130,9 @@ def score(self, features: Tensor) -> Tensor: return score def forward(self, dataset: Tensor) -> None: - """ - Provides the same functionality as `fit`. Transforms the input dataset based on singular values calculated - earlier. + """Provides the same functionality as `fit`. + + Transforms the input dataset based on singular values calculated earlier. Args: dataset (Tensor): Input dataset diff --git a/anomalib/models/dfm/model.py b/anomalib/models/dfm/model.py index c3370232e6..a5a18e7767 100644 --- a/anomalib/models/dfm/model.py +++ b/anomalib/models/dfm/model.py @@ -1,6 +1,4 @@ -""" -DFM: Deep Feature Kernel Density Estimation -""" +"""DFM: Deep Feature Kernel Density Estimation.""" # Copyright (C) 2020 Intel Corporation # @@ -28,9 +26,7 @@ class DfmLightning(AnomalyModule): - """ - DFM: Deep Featured Kernel Density Estimation - """ + """DFM: Deep Featured Kernel Density Estimation.""" def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__(hparams) @@ -40,14 +36,13 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.automatic_optimization = False @staticmethod - def configure_optimizers(): - """ - DFM doesn't require optimization, therefore returns no optimizers. - """ + def configure_optimizers() -> None: + """DFM doesn't require optimization, therefore returns no optimizers.""" return None def training_step(self, batch, _): # pylint: disable=arguments-differ """Training Step of DFM. + For each batch, features are extracted from the CNN. Args: @@ -56,7 +51,6 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ Returns: Deep CNN features. - """ self.feature_extractor.eval() @@ -72,7 +66,7 @@ def training_epoch_end(self, outputs: List[Dict[str, Any]]) -> None: outputs: dict: Returns: - + None """ feature_stack = torch.vstack([output["feature_vector"] for output in outputs]) @@ -80,8 +74,8 @@ def training_epoch_end(self, outputs: List[Dict[str, Any]]) -> None: def validation_step(self, batch, _): # pylint: disable=arguments-differ """Validation Step of DFM. - Similar to the training step, features - are extracted from the CNN for each batch. + + Similar to the training step, features are extracted from the CNN for each batch. Args: batch: Dict: Input batch @@ -89,7 +83,6 @@ def validation_step(self, batch, _): # pylint: disable=arguments-differ Returns: Dictionary containing FRE anomaly scores and ground-truth. - """ self.feature_extractor.eval() diff --git a/anomalib/models/padim/__init__.py b/anomalib/models/padim/__init__.py index ec58034110..d85459be9e 100644 --- a/anomalib/models/padim/__init__.py +++ b/anomalib/models/padim/__init__.py @@ -1,6 +1,4 @@ -""" -PADIM model -""" +"""PADIM model.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/models/padim/model.py b/anomalib/models/padim/model.py index db21b41eb6..ec4cd89e99 100644 --- a/anomalib/models/padim/model.py +++ b/anomalib/models/padim/model.py @@ -1,6 +1,6 @@ -""" -PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization -https://arxiv.org/abs/2011.08785 +"""PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization. + +Paper https://arxiv.org/abs/2011.08785 """ # Copyright (C) 2020 Intel Corporation @@ -42,8 +42,7 @@ class PadimModel(nn.Module): - """ - Padim Module + """Padim Module. Args: layers (List[str]): Layers used for feature extraction @@ -93,24 +92,24 @@ def forward(self, input_tensor: Tensor) -> Tensor: """Forward-pass image-batch (N, C, H, W) into model to extract features. Args: - input_tensor: Image-batch (N, C, H, W) - input_tensor: Tensor: + input_tensor: Image-batch (N, C, H, W) + input_tensor: Tensor: Returns: - Features from single/multiple layers. - - :Example: - - >>> x = torch.randn(32, 3, 224, 224) - >>> features = self.extract_features(input_tensor) - >>> features.keys() - dict_keys(['layer1', 'layer2', 'layer3']) + Features from single/multiple layers. - >>> [v.shape for v in features.values()] - [torch.Size([32, 64, 56, 56]), - torch.Size([32, 128, 28, 28]), - torch.Size([32, 256, 14, 14])] + Example: + >>> x = torch.randn(32, 3, 224, 224) + >>> features = self.extract_features(input_tensor) + >>> features.keys() + dict_keys(['layer1', 'layer2', 'layer3']) + + >>> [v.shape for v in features.values()] + [torch.Size([32, 64, 56, 56]), + torch.Size([32, 128, 28, 28]), + torch.Size([32, 256, 14, 14])] """ + if self.apply_tiling: input_tensor = self.tiler.tile(input_tensor) with torch.no_grad(): @@ -129,17 +128,17 @@ def forward(self, input_tensor: Tensor) -> Tensor: return output def generate_embedding(self, features: Dict[str, Tensor]) -> Tensor: - """Generate embedding from hierarchical feature map + """Generate embedding from hierarchical feature map. Args: - features: Hierarchical feature map from a CNN (ResNet18 or WideResnet) - features: Dict[str: - Tensor]: + features: Hierarchical feature map from a CNN (ResNet18 or WideResnet) + features: Dict[str: + Tensor]: Returns: - Embedding vector - + Embedding vector """ + embeddings = features[self.layers[0]] for layer in self.layers[1:]: layer_embedding = features[layer] @@ -153,7 +152,7 @@ def generate_embedding(self, features: Dict[str, Tensor]) -> Tensor: class AnomalyMapGenerator: - """Generate Anomaly Heatmap""" + """Generate Anomaly Heatmap.""" def __init__(self, image_size: Union[ListConfig, Tuple], sigma: int = 4): self.image_size = image_size if isinstance(image_size, tuple) else tuple(image_size) @@ -161,8 +160,8 @@ def __init__(self, image_size: Union[ListConfig, Tuple], sigma: int = 4): @staticmethod def compute_distance(embedding: Tensor, stats: List[Tensor]) -> Tensor: - """ - Compute anomaly score to the patch in position(i,j) of a test image + """Compute anomaly score to the patch in position(i,j) of a test image. + Ref: Equation (2), Section III-C of the paper. Args: @@ -173,8 +172,7 @@ def compute_distance(embedding: Tensor, stats: List[Tensor]) -> Tensor: stats: List[Tensor]: Returns: - Anomaly score of a test image via mahalanobis distance. - + Anomaly score of a test image via mahalanobis distance. """ batch, channel, height, width = embedding.shape @@ -191,8 +189,7 @@ def compute_distance(embedding: Tensor, stats: List[Tensor]) -> Tensor: return distances def up_sample(self, distance: Tensor) -> Tensor: - """ - Up sample anomaly score to match the input image size. + """Up sample anomaly score to match the input image size. Args: distance: Anomaly score computed via the mahalanobis distance. @@ -200,7 +197,6 @@ def up_sample(self, distance: Tensor) -> Tensor: Returns: Resized distance matrix matching the input image size - """ score_map = F.interpolate( @@ -212,8 +208,7 @@ def up_sample(self, distance: Tensor) -> Tensor: return score_map def smooth_anomaly_map(self, anomaly_map: Tensor) -> Tensor: - """ - Apply gaussian smoothing to the anomaly map + """Apply gaussian smoothing to the anomaly map. Args: anomaly_map: Anomaly score for the test image(s) @@ -221,17 +216,18 @@ def smooth_anomaly_map(self, anomaly_map: Tensor) -> Tensor: Returns: Filtered anomaly scores - """ + kernel_size = 2 * int(4.0 * self.sigma + 0.5) + 1 anomaly_map = gaussian_blur2d(anomaly_map, (kernel_size, kernel_size), sigma=(self.sigma, self.sigma)) return anomaly_map def compute_anomaly_map(self, embedding: Tensor, mean: Tensor, inv_covariance: Tensor) -> Tensor: - """ - Compute anomaly score based on embedding vector, mean and inv_covariance of the multivariate - gaussian distribution. + """Compute anomaly score. + + Scores are calculated based on embedding vector, mean and inv_covariance of the multivariate gaussian + distribution. Args: embedding: Embedding vector extracted from the test set. @@ -243,7 +239,6 @@ def compute_anomaly_map(self, embedding: Tensor, mean: Tensor, inv_covariance: T Returns: Output anomaly score. - """ score_map = self.compute_distance( @@ -256,9 +251,9 @@ def compute_anomaly_map(self, embedding: Tensor, mean: Tensor, inv_covariance: T return smoothed_anomaly_map def __call__(self, **kwds): - """ - Returns anomaly_map. - Expects `embedding`, `mean` and `covariance` keywords to be passed explicitly + """Returns anomaly_map. + + Expects `embedding`, `mean` and `covariance` keywords to be passed explicitly. Example: >>> anomaly_map_generator = AnomalyMapGenerator(image_size=input_size) @@ -270,6 +265,7 @@ def __call__(self, **kwds): Returns: torch.Tensor: anomaly map """ + if not ("embedding" in kwds and "mean" in kwds and "inv_covariance" in kwds): raise ValueError(f"Expected keys `embedding`, `mean` and `covariance`. Found {kwds.keys()}") @@ -281,9 +277,7 @@ def __call__(self, **kwds): class PadimLightning(AnomalyModule): - """ - PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization - """ + """PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization.""" def __init__(self, hparams): super().__init__(hparams) @@ -306,8 +300,7 @@ def configure_optimizers(): return None def training_step(self, batch, _): # pylint: disable=arguments-differ - """Training Step of PADIM. - For each batch, hierarchical features are extracted from the CNN. + """Training Step of PADIM. For each batch, hierarchical features are extracted from the CNN. Args: batch: Input batch @@ -315,29 +308,29 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ Returns: Hierarchical feature map - """ + self.model.feature_extractor.eval() embeddings = self.model(batch["image"]) return {"embeddings": embeddings.cpu()} - def training_epoch_end(self, outputs): + def training_epoch_end(self, outputs) -> None: """Fit a multivariate gaussian model on an embedding extracted from deep hierarchical CNN features. Args: outputs: Batch of outputs from the training step Returns: - + None """ + embeddings = torch.vstack([x["embeddings"] for x in outputs]) self.stats = self.model.gaussian.fit(embeddings) def validation_step(self, batch, _): # pylint: disable=arguments-differ - """ - Validation Step of PADIM. - Similar to the training step, hierarchical features - are extracted from the CNN for each batch. + """Validation Step of PADIM. + + Similar to the training step, hierarchical features are extracted from the CNN for each batch. Args: batch: Input batch @@ -346,8 +339,8 @@ def validation_step(self, batch, _): # pylint: disable=arguments-differ Returns: Dictionary containing images, features, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. - """ + batch["anomaly_maps"] = self.model(batch["image"]) return batch diff --git a/anomalib/models/patchcore/__init__.py b/anomalib/models/patchcore/__init__.py index 3b9b60133a..547a90b152 100644 --- a/anomalib/models/patchcore/__init__.py +++ b/anomalib/models/patchcore/__init__.py @@ -1,6 +1,4 @@ -""" -PatchCore model -""" +"""PatchCore model.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/models/patchcore/model.py b/anomalib/models/patchcore/model.py index fb3edb540a..b1bb8f996e 100644 --- a/anomalib/models/patchcore/model.py +++ b/anomalib/models/patchcore/model.py @@ -1,7 +1,4 @@ -""" -Towards Total Recall in Industrial Anomaly Detection -https://arxiv.org/abs/2106.08265 -""" +"""Towards Total Recall in Industrial Anomaly Detection https://arxiv.org/abs/2106.08265.""" # Copyright (C) 2020 Intel Corporation # @@ -40,9 +37,7 @@ class AnomalyMapGenerator: - """ - Generate Anomaly Heatmap - """ + """Generate Anomaly Heatmap.""" def __init__( self, @@ -53,8 +48,7 @@ def __init__( self.sigma = sigma def compute_anomaly_map(self, score_patches: np.ndarray) -> np.ndarray: - """ - Pixel Level Anomaly Heatmap + """Pixel Level Anomaly Heatmap. Args: score_patches (np.ndarray): [description] @@ -67,8 +61,7 @@ def compute_anomaly_map(self, score_patches: np.ndarray) -> np.ndarray: @staticmethod def compute_anomaly_score(patch_scores: np.ndarray) -> np.ndarray: - """ - Compute Image-Level Anomaly Score + """Compute Image-Level Anomaly Score. Args: patch_scores (np.ndarray): [description] @@ -79,8 +72,8 @@ def compute_anomaly_score(patch_scores: np.ndarray) -> np.ndarray: return score def __call__(self, **kwds: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: - """ - Returns anomaly_map and anomaly_score. + """Returns anomaly_map and anomaly_score. + Expects `patch_scores` keyword to be passed explicitly Example @@ -104,9 +97,7 @@ def __call__(self, **kwds: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: class PatchcoreModel(DynamicBufferModule, nn.Module): - """ - Patchcore Module - """ + """Patchcore Module.""" def __init__( self, @@ -138,10 +129,12 @@ def __init__( self.memory_bank: torch.Tensor def forward(self, input_tensor: Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: - """ - Get features from a CNN. - Generate embedding based on the feautures. - Compute anomaly map in test mode. + """Return Embedding during training, or a tuple of anomaly map and anomaly score during testing. + + Steps performed: + 1. Get features from a CNN. + 2. Generate embedding based on the features. + 3. Compute anomaly map in test mode. Args: input_tensor (Tensor): Input tensor @@ -175,8 +168,7 @@ def forward(self, input_tensor: Tensor) -> Union[torch.Tensor, Tuple[torch.Tenso return output def generate_embedding(self, features: Dict[str, Tensor]) -> torch.Tensor: - """ - Generate embedding from hierarchical feature map + """Generate embedding from hierarchical feature map. Args: features: Hierarchical feature map from a CNN (ResNet18 or WideResnet) @@ -184,7 +176,6 @@ def generate_embedding(self, features: Dict[str, Tensor]) -> torch.Tensor: Returns: Embedding vector - """ embeddings = features[self.layers[0]] @@ -197,7 +188,8 @@ def generate_embedding(self, features: Dict[str, Tensor]) -> torch.Tensor: @staticmethod def reshape_embedding(embedding: Tensor) -> Tensor: - """ + """Reshape Embedding. + Reshapes Embedding to the following format: [Batch, Embedding, Patch, Patch] to [Batch*Patch*Patch, Embedding] @@ -213,8 +205,7 @@ def reshape_embedding(embedding: Tensor) -> Tensor: @staticmethod def subsample_embedding(embedding: torch.Tensor, sampling_ratio: float) -> torch.Tensor: - """ - Subsample embedding based on coreset sampling + """Subsample embedding based on coreset sampling. Args: embedding (np.ndarray): Embedding tensor from the CNN @@ -234,8 +225,7 @@ def subsample_embedding(embedding: torch.Tensor, sampling_ratio: float) -> torch class PatchcoreLightning(AnomalyModule): - """ - PatchcoreLightning Module to train PatchCore algorithm + """PatchcoreLightning Module to train PatchCore algorithm. Args: layers (List[str]): Layers used for feature extraction @@ -260,8 +250,7 @@ def __init__(self, hparams): self.automatic_optimization = False def configure_optimizers(self): - """ - Configure optimizers + """Configure optimizers. Returns: None: Do not set optimizers by returning None. @@ -269,8 +258,7 @@ def configure_optimizers(self): return None def training_step(self, batch, _): # pylint: disable=arguments-differ - """ - Generate feature embedding of the batch. + """Generate feature embedding of the batch. Args: batch (Dict[str, Any]): Batch containing image filename, @@ -286,8 +274,8 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ return {"embedding": embedding} def training_epoch_end(self, outputs): - """ - Concatenate batch embeddings to generate normal embedding. + """Concatenate batch embeddings to generate normal embedding. + Apply coreset subsampling to the embedding set for dimensionality reduction. Args: @@ -302,10 +290,7 @@ def training_epoch_end(self, outputs): self.model.memory_bank = embedding def validation_step(self, batch, _): # pylint: disable=arguments-differ - """ - Load the normal embedding to use it as memory bank. - Apply nearest neighborhood to the embedding. - Generate the anomaly map. + """Get batch of anomaly maps from input image batch. Args: batch (Dict[str, Any]): Batch containing image filename, diff --git a/anomalib/models/patchcore/utils/__init__.py b/anomalib/models/patchcore/utils/__init__.py index e69de29bb2..558f87830e 100644 --- a/anomalib/models/patchcore/utils/__init__.py +++ b/anomalib/models/patchcore/utils/__init__.py @@ -0,0 +1 @@ +"""Helper utilities for PatchCore model.""" diff --git a/anomalib/models/patchcore/utils/sampling/__init__.py b/anomalib/models/patchcore/utils/sampling/__init__.py index 378e56412b..549ddfe47d 100644 --- a/anomalib/models/patchcore/utils/sampling/__init__.py +++ b/anomalib/models/patchcore/utils/sampling/__init__.py @@ -1,6 +1,4 @@ -""" -Patchcore sampling utils -""" +"""Patchcore sampling utils.""" from .k_center_greedy import KCenterGreedy from .nearest_neighbors import NearestNeighbors diff --git a/anomalib/models/patchcore/utils/sampling/k_center_greedy.py b/anomalib/models/patchcore/utils/sampling/k_center_greedy.py index a755bb02a5..39cfb13dfc 100644 --- a/anomalib/models/patchcore/utils/sampling/k_center_greedy.py +++ b/anomalib/models/patchcore/utils/sampling/k_center_greedy.py @@ -1,9 +1,8 @@ -""" -This module comprises PatchCore Sampling Methods for the embedding. - - k Center Greedy Method - Returns points that minimizes the maximum distance of any point to a center. - . https://arxiv.org/abs/1708.00489 +"""This module comprises PatchCore Sampling Methods for the embedding. +- k Center Greedy Method + Returns points that minimizes the maximum distance of any point to a center. + . https://arxiv.org/abs/1708.00489 """ from typing import List, Optional @@ -16,8 +15,7 @@ class KCenterGreedy: - """ - Implements k-center-greedy method + """Implements k-center-greedy method. Args: model: model with scikit-like API with decision_function. Defaults to SparseRandomProjection. @@ -45,14 +43,11 @@ def __init__(self, model: SparseRandomProjection, embedding: Tensor, sampling_ra self.already_selected_idxs: List[int] = [] def reset_distances(self) -> None: - """ - Reset minimum distances - """ + """Reset minimum distances.""" self.min_distances = None def get_new_cluster_centers(self, cluster_centers: List[int]) -> List[int]: - """ - Get new cluster center indexes from the list of cluster indexes. + """Get new cluster center indexes from the list of cluster indexes. Args: cluster_centers (List[int]): List of cluster center indexes. @@ -63,8 +58,7 @@ def get_new_cluster_centers(self, cluster_centers: List[int]) -> List[int]: return [d for d in cluster_centers if d not in self.already_selected_idxs] def update_distances(self, cluster_centers: List[int]) -> None: - """ - Update min distances given cluster centers. + """Update min distances given cluster centers. Args: cluster_centers (List[int]): indices of cluster centers @@ -82,9 +76,9 @@ def update_distances(self, cluster_centers: List[int]) -> None: self.min_distances = torch.minimum(self.min_distances, distance) def get_new_idx(self) -> int: - """ - Get index value of a sample based on (i) either minimum distance of the cluster - or (ii) random subsampling from the embedding. + """Get index value of a sample. + + Based on (i) either minimum distance of the cluster or (ii) random subsampling from the embedding. Returns: int: Sample index @@ -102,8 +96,7 @@ def get_new_idx(self) -> int: return idx def select_coreset_idxs(self, selected_idxs: Optional[List[int]] = None) -> List[int]: - """ - Greedily form a coreset to minimize the maximum distance of a cluster. + """Greedily form a coreset to minimize the maximum distance of a cluster. Args: selected_idxs: index of samples already selected. Defaults to an empty set. @@ -136,8 +129,7 @@ def select_coreset_idxs(self, selected_idxs: Optional[List[int]] = None) -> List return selected_coreset_idxs def sample_coreset(self, selected_idxs: Optional[List[int]] = None) -> Tensor: - """ - Select coreset from the embedding + """Select coreset from the embedding. Args: selected_idxs: index of samples already selected. Defaults to an empty set. diff --git a/anomalib/models/patchcore/utils/sampling/nearest_neighbors.py b/anomalib/models/patchcore/utils/sampling/nearest_neighbors.py index 2ae8db344f..d5b7857389 100644 --- a/anomalib/models/patchcore/utils/sampling/nearest_neighbors.py +++ b/anomalib/models/patchcore/utils/sampling/nearest_neighbors.py @@ -1,7 +1,6 @@ -""" -This module comprises PatchCore Sampling Methods for the embedding. - - Nearest Neighbours +"""This module comprises PatchCore Sampling Methods for the embedding. +- Nearest Neighbours """ # Copyright (C) 2020 Intel Corporation @@ -27,8 +26,7 @@ class NearestNeighbors(DynamicBufferModule): - """ - Nearest Neighbours using brute force method and euclidean norm + """Nearest Neighbours using brute force method and euclidean norm. Args: n_neighbors (int): Number of neighbors to look at @@ -42,8 +40,7 @@ def __init__(self, n_neighbors: int): self._fit_x: Tensor def fit(self, train_features: Tensor): - """ - Saves the train features for NN search later + """Saves the train features for NN search later. Args: train_features (Tensor): Training data @@ -51,8 +48,9 @@ def fit(self, train_features: Tensor): self._fit_x = train_features def kneighbors(self, test_features: Tensor) -> Tuple[Tensor, Tensor]: - """ - Return k-nearest neighbors. It is calculated based on bruteforce method. + """Return k-nearest neighbors. + + It is calculated based on bruteforce method. Args: test_features (Tensor): test data diff --git a/anomalib/models/patchcore/utils/sampling/random_projection.py b/anomalib/models/patchcore/utils/sampling/random_projection.py index 51a1710147..6cd2231911 100644 --- a/anomalib/models/patchcore/utils/sampling/random_projection.py +++ b/anomalib/models/patchcore/utils/sampling/random_projection.py @@ -1,8 +1,7 @@ -""" -This module comprises PatchCore Sampling Methods for the embedding. - - Random Sparse Projector - Sparse Random Projection using PyTorch Operations +"""This module comprises PatchCore Sampling Methods for the embedding. +- Random Sparse Projector + Sparse Random Projection using PyTorch Operations """ # Copyright (C) 2020 Intel Corporation @@ -28,14 +27,11 @@ class NotFittedError(ValueError, AttributeError): - """ - Raise Exception if estimator is used before fitting - """ + """Raise Exception if estimator is used before fitting.""" class SparseRandomProjection: - """ - Sparse Random Projection using PyTorch operations + """Sparse Random Projection using PyTorch operations. Args: eps (float, optional): Minimum distortion rate parameter for calculating @@ -51,9 +47,7 @@ def __init__(self, eps: float = 0.1, random_state: Optional[int] = None) -> None self.random_state = random_state def _sparse_random_matrix(self, n_features: int): - """ - Random sparse matrix. - Based on https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf + """Random sparse matrix. Based on https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf. Args: n_features (int): Dimentionality of the original source space @@ -95,12 +89,12 @@ def _sparse_random_matrix(self, n_features: int): return components def johnson_lindenstrauss_min_dim(self, n_samples: int, eps: float = 0.1): - """ - Find a 'safe' number of components to randomly project to + """Find a 'safe' number of components to randomly project to. + Ref eqn 2.1 https://cseweb.ucsd.edu/~dasgupta/papers/jl.pdf Args: - n_namples (int): Number of samples used to compute safe components + n_samples (int): Number of samples used to compute safe components eps (float, optional): Minimum distortion rate. Defaults to 0.1. """ @@ -108,8 +102,7 @@ def johnson_lindenstrauss_min_dim(self, n_samples: int, eps: float = 0.1): return (4 * np.log(n_samples) / denominator).astype(np.int64) def fit(self, embedding: Tensor) -> "SparseRandomProjection": - """ - Generates sparse matrix from the embedding tensor + """Generates sparse matrix from the embedding tensor. Args: embedding (Tensor): embedding tensor for generating embedding @@ -133,8 +126,7 @@ def fit(self, embedding: Tensor) -> "SparseRandomProjection": return self def transform(self, embedding: Tensor) -> Tensor: - """ - Project the data by using matrix product with the random matrix + """Project the data by using matrix product with the random matrix. Args: embedding (Tensor): Embedding of shape (n_samples, n_features) @@ -144,7 +136,6 @@ def transform(self, embedding: Tensor) -> Tensor: projected_embedding (Tensor): Sparse matrix of shape (n_samples, n_components) Projected array. """ - if self.sparse_random_matrix is None: raise NotFittedError("`fit()` has not been called on SparseRandomProjection yet.") diff --git a/anomalib/models/stfpm/__init__.py b/anomalib/models/stfpm/__init__.py index e470f0fb32..4da5c046ca 100644 --- a/anomalib/models/stfpm/__init__.py +++ b/anomalib/models/stfpm/__init__.py @@ -1,6 +1,4 @@ -""" -STFPM Model -""" +"""STFPM Model.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/models/stfpm/model.py b/anomalib/models/stfpm/model.py index 7772a3b62c..d0885bff25 100644 --- a/anomalib/models/stfpm/model.py +++ b/anomalib/models/stfpm/model.py @@ -1,5 +1,5 @@ -""" -STFPM: Student-Teacher Feature Pyramid Matching for Unsupervised Anomaly Detection +"""STFPM: Student-Teacher Feature Pyramid Matching for Unsupervised Anomaly Detection. + https://arxiv.org/abs/2103.04257 """ @@ -34,26 +34,23 @@ class Loss(nn.Module): - """ - Feature Pyramid Loss - This class implmenents the feature pyramid loss function proposed in STFPM [1] paper. + """Feature Pyramid Loss This class implmenents the feature pyramid loss function proposed in STFPM [1] paper. Example: - - >>> from anomalib.core.model.feature_extractor import FeatureExtractor - >>> from anomalib.models.stfpm.model import Loss - >>> from torchvision.models import resnet18 - - >>> layers = ['layer1', 'layer2', 'layer3'] - >>> teacher_model = FeatureExtractor(model=resnet18(pretrained=True), layers=layers) - >>> student_model = FeatureExtractor(model=resnet18(pretrained=False), layers=layers) - >>> loss = Loss() - - >>> inp = torch.rand((4, 3, 256, 256)) - >>> teacher_features = teacher_model(inp) - >>> student_features = student_model(inp) - >>> loss(student_features, teacher_features) - tensor(51.2015, grad_fn=) + >>> from anomalib.core.model.feature_extractor import FeatureExtractor + >>> from anomalib.models.stfpm.model import Loss + >>> from torchvision.models import resnet18 + + >>> layers = ['layer1', 'layer2', 'layer3'] + >>> teacher_model = FeatureExtractor(model=resnet18(pretrained=True), layers=layers) + >>> student_model = FeatureExtractor(model=resnet18(pretrained=False), layers=layers) + >>> loss = Loss() + + >>> inp = torch.rand((4, 3, 256, 256)) + >>> teacher_features = teacher_model(inp) + >>> student_features = student_model(inp) + >>> loss(student_features, teacher_features) + tensor(51.2015, grad_fn=) """ def __init__(self): @@ -71,7 +68,6 @@ def compute_layer_loss(self, teacher_feats: Tensor, student_feats: Tensor) -> Te Returns: L2 distance between teacher and student features. - """ height, width = teacher_feats.shape[2:] @@ -83,8 +79,7 @@ def compute_layer_loss(self, teacher_feats: Tensor, student_feats: Tensor) -> Te return layer_loss def forward(self, teacher_features: Dict[str, Tensor], student_features: Dict[str, Tensor]) -> Tensor: - """Compute the overall loss via the weighted average of - the layer losses computed by the cosine similarity. + """Compute the overall loss via the weighted average of the layer losses computed by the cosine similarity. Args: teacher_features: Teacher features @@ -95,7 +90,6 @@ def forward(self, teacher_features: Dict[str, Tensor], student_features: Dict[st Returns: Total loss, which is the weighted average of the layer losses. - """ layer_losses: List[Tensor] = [] @@ -109,7 +103,7 @@ def forward(self, teacher_features: Dict[str, Tensor], student_features: Dict[st class AnomalyMapGenerator: - """Generate Anomaly Heatmap""" + """Generate Anomaly Heatmap.""" def __init__( self, @@ -129,7 +123,6 @@ def compute_layer_map(self, teacher_features: Tensor, student_features: Tensor) Returns: Anomaly score based on cosine similarity. - """ norm_teacher_features = F.normalize(teacher_features) norm_student_features = F.normalize(student_features) @@ -141,8 +134,7 @@ def compute_layer_map(self, teacher_features: Tensor, student_features: Tensor) def compute_anomaly_map( self, teacher_features: Dict[str, Tensor], student_features: Dict[str, Tensor] ) -> torch.Tensor: - """ - Compute the overall anomaly map via element-wise production the interpolated anomaly maps. + """Compute the overall anomaly map via element-wise production the interpolated anomaly maps. Args: teacher_features: Teacher features @@ -163,13 +155,16 @@ def compute_anomaly_map( return anomaly_map def __call__(self, **kwds: Dict[str, Tensor]) -> torch.Tensor: - """ - Returns anomaly_map. - Expects `teach_features` and `student_features` keywords to be passed explicitly + """Returns anomaly map. + + Expects `teach_features` and `student_features` keywords to be passed explicitly. - Example - >>> anomaly_map_generator = AnomalyMapGenerator(image_size=tuple(hparams.model.input_size)) - >>> output = self.anomaly_map_generator(teacher_features=teacher_features, student_features=student_features) + Example: + >>> anomaly_map_generator = AnomalyMapGenerator(image_size=tuple(hparams.model.input_size)) + >>> output = self.anomaly_map_generator( + teacher_features=teacher_features, + student_features=student_features + ) Raises: ValueError: `teach_features` and `student_features` keys are not found @@ -188,8 +183,7 @@ def __call__(self, **kwds: Dict[str, Tensor]) -> torch.Tensor: class STFPMModel(nn.Module): - """ - STFPM: Student-Teacher Feature Pyramid Matching for Unsupervised Anomaly Detection + """STFPM: Student-Teacher Feature Pyramid Matching for Unsupervised Anomaly Detection. Args: layers (List[str]): Layers used for feature extraction @@ -229,9 +223,9 @@ def __init__( self.anomaly_map_generator = AnomalyMapGenerator(image_size=tuple(input_size)) def forward(self, images): - """ - Forward-pass images into the network. During the training mode - the model extracts the features from the teacher and student networks. + """Forward-pass images into the network. + + During the training mode the model extracts the features from the teacher and student networks. During the evaluation mode, it returns the predicted anomaly map. Args: @@ -239,7 +233,6 @@ def forward(self, images): Returns: Teacher and student features when in training mode, otherwise the predicted anomaly maps. - """ if self.apply_tiling: images = self.tiler.tile(images) @@ -256,9 +249,7 @@ def forward(self, images): class StfpmLightning(AnomalyModule): - """ - PL Lightning Module for the STFPM algorithm. - """ + """PL Lightning Module for the STFPM algorithm.""" def __init__(self, hparams): super().__init__(hparams) @@ -274,9 +265,7 @@ def __init__(self, hparams): self.loss_val = 0 def configure_callbacks(self): - """ - Configure model-specific callbacks. - """ + """Configure model-specific callbacks.""" early_stopping = EarlyStopping( monitor=self.hparams.model.early_stopping.metric, patience=self.hparams.model.early_stopping.patience, @@ -284,16 +273,11 @@ def configure_callbacks(self): ) return [early_stopping] - def configure_optimizers(self): - """ - Configure optimizers by creating an SGD optimizer. - - :return: SGD optimizer - - Args: + def configure_optimizers(self) -> torch.optim.Optimizer: + """Configure optimizers by creating an SGD optimizer. Returns: - + (Optimizer): SGD optimizer """ return optim.SGD( params=self.model.student_model.parameters(), @@ -303,10 +287,9 @@ def configure_optimizers(self): ) def training_step(self, batch, _): # pylint: disable=arguments-differ - """ - Training Step of STFPM.. - For each batch, teacher and student and teacher features - are extracted from the CNN. + """Training Step of STFPM. + + For each batch, teacher and student and teacher features are extracted from the CNN. Args: batch: Input batch @@ -314,7 +297,6 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ Returns: Hierarchical feature map - """ self.model.teacher_model.eval() teacher_features, student_features = self.model.forward(batch["image"]) @@ -323,9 +305,10 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ return {"loss": loss} def validation_step(self, batch, _): # pylint: disable=arguments-differ - """ - Validation Step of STFPM. Similar to the training step, student/teacher - features are extracted from the CNN for each batch, and anomaly map is computed. + """Validation Step of STFPM. + + Similar to the training step, student/teacher features are extracted from the CNN for each batch, and + anomaly map is computed. Args: batch: Input batch @@ -334,7 +317,6 @@ def validation_step(self, batch, _): # pylint: disable=arguments-differ Returns: Dictionary containing images, anomaly maps, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. - """ batch["anomaly_maps"] = self.model(batch["image"]) diff --git a/anomalib/utils/__init__.py b/anomalib/utils/__init__.py index d11c3fb1ca..4c9027199a 100644 --- a/anomalib/utils/__init__.py +++ b/anomalib/utils/__init__.py @@ -1,6 +1,4 @@ -""" -This modules contains helpers for downloading files, calculating metrics, computing anomaly maps, and visualization -""" +"""Helpers for downloading files, calculating metrics, computing anomaly maps, and visualization.""" # Copyright (C) 2020 Intel Corporation # diff --git a/anomalib/utils/download_progress_bar.py b/anomalib/utils/download_progress_bar.py index 4bb681710d..26af24834a 100644 --- a/anomalib/utils/download_progress_bar.py +++ b/anomalib/utils/download_progress_bar.py @@ -1,5 +1,6 @@ -""" -Helper to show progress bars with `urlretrieve`. Based on https://stackoverflow.com/a/53877507 +"""Helper to show progress bars with `urlretrieve`. + +Based on https://stackoverflow.com/a/53877507 """ # Copyright (C) 2020 Intel Corporation @@ -23,93 +24,93 @@ class DownloadProgressBar(tqdm): - """ - Create progress bar for urlretrieve. - Subclasses `tqdm`. For information about the parameters in constructor, refer to `tqdm`'s documentation. + """Create progress bar for urlretrieve. Subclasses `tqdm`. + + For information about the parameters in constructor, refer to `tqdm`'s documentation. Args: - iterable (Optional[Iterable]): Iterable to decorate with a progressbar. - Leave blank to manually manage the updates. - desc (Optional[str]): Prefix for the progressbar. - total (Optional[Union[int, float]]): The number of expected iterations. If unspecified, - len(iterable) is used if possible. If float("inf") or as a last - resort, only basic progress statistics are displayed - (no ETA, no progressbar). - If `gui` is True and this parameter needs subsequent updating, - specify an initial arbitrary large positive number, - e.g. 9e9. - leave (Optional[bool]): upon termination of iteration. If `None`, will leave only if `position` is `0`. - file (Optional[Union[io.TextIOWrapper, io.StringIO]]): Specifies where to output the progress messages - (default: sys.stderr). Uses `file.write(str)` and - `file.flush()` methods. For encoding, see - `write_bytes`. - ncols (Optional[int]): The width of the entire output message. If specified, - dynamically resizes the progressbar to stay within this bound. - If unspecified, attempts to use environment width. The - fallback is a meter width of 10 and no limit for the counter and - statistics. If 0, will not print any meter (only stats). - mininterval (Optional[float]): Minimum progress display update interval [default: 0.1] seconds. - maxinterval (Optional[float]): Maximum progress display update interval [default: 10] seconds. - Automatically adjusts `miniters` to correspond to `mininterval` - after long display update lag. Only works if `dynamic_miniters` - or monitor thread is enabled. - miniters (Optional[Union[int, float]]): Minimum progress display update interval, in iterations. - If 0 and `dynamic_miniters`, will automatically adjust to equal - `mininterval` (more CPU efficient, good for tight loops). - If > 0, will skip display of specified number of iterations. - Tweak this and `mininterval` to get very efficient loops. - If your progress is erratic with both fast and slow iterations - (network, skipping items, etc) you should set miniters=1. - use_ascii (Optional[Union[bool, str]]): If unspecified or False, use unicode (smooth blocks) to fill - the meter. The fallback is to use ASCII characters " 123456789#". - disable (Optional[bool]): Whether to disable the entire progressbar wrapper - [default: False]. If set to None, disable on non-TTY. - unit (Optional[str]): String that will be used to define the unit of each iteration - [default: it]. - unit_scale (Union[bool, int, float]): If 1 or True, the number of iterations will be reduced/scaled - automatically and a metric prefix following the - International System of Units standard will be added - (kilo, mega, etc.) [default: False]. If any other non-zero - number, will scale `total` and `n`. - dynamic_ncols (Optional[bool]): If set, constantly alters `ncols` and `nrows` to the - environment (allowing for window resizes) [default: False]. - smoothing (Optional[float]): Exponential moving average smoothing factor for speed estimates - (ignored in GUI mode). Ranges from 0 (average speed) to 1 - (current/instantaneous speed) [default: 0.3]. - bar_format (Optional[str]): Specify a custom bar string formatting. May impact performance. - [default: '{l_bar}{bar}{r_bar}'], where - l_bar='{desc}: {percentage:3.0f}%|' and - r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' - '{rate_fmt}{postfix}]' - Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, - percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, - rate, rate_fmt, rate_noinv, rate_noinv_fmt, - rate_inv, rate_inv_fmt, postfix, unit_divisor, - remaining, remaining_s, eta. - Note that a trailing ": " is automatically removed after {desc} - if the latter is empty. - initial (Optional[Union[int, float]]): The initial counter value. Useful when restarting a progress - bar [default: 0]. If using float, consider specifying `{n:.3f}` - or similar in `bar_format`, or specifying `unit_scale`. - position (Optional[int]): Specify the line offset to print this bar (starting from 0) - Automatic if unspecified. - Useful to manage multiple bars at once (eg, from threads). - postfix (Optional[Dict]): Specify additional stats to display at the end of the bar. - Calls `set_postfix(**postfix)` if possible (dict). - unit_divisor (Optional[float]): [default: 1000], ignored unless `unit_scale` is True. - write_bytes (Optional[bool]): If (default: None) and `file` is unspecified, - bytes will be written in Python 2. If `True` will also write - bytes. In all other cases will default to unicode. - lock_args (Optional[tuple]): Passed to `refresh` for intermediate output - (initialisation, iterating, and updating). - nrows (Optional[int]): The screen height. If specified, hides nested bars - outside this bound. If unspecified, attempts to use environment height. - The fallback is 20. - colour (Optional[str]): Bar colour (e.g. 'green', '#00ff00'). - delay (Optional[float]): Don't display until [default: 0] seconds have elapsed. - gui (Optional[bool]): WARNING: internal parameter - do not use. - Use tqdm.gui.tqdm(...) instead. If set, will attempt to use - matplotlib animations for a graphical output [default: False]. + iterable (Optional[Iterable]): Iterable to decorate with a progressbar. + Leave blank to manually manage the updates. + desc (Optional[str]): Prefix for the progressbar. + total (Optional[Union[int, float]]): The number of expected iterations. If unspecified, + len(iterable) is used if possible. If float("inf") or as a last + resort, only basic progress statistics are displayed + (no ETA, no progressbar). + If `gui` is True and this parameter needs subsequent updating, + specify an initial arbitrary large positive number, + e.g. 9e9. + leave (Optional[bool]): upon termination of iteration. If `None`, will leave only if `position` is `0`. + file (Optional[Union[io.TextIOWrapper, io.StringIO]]): Specifies where to output the progress messages + (default: sys.stderr). Uses `file.write(str)` and + `file.flush()` methods. For encoding, see + `write_bytes`. + ncols (Optional[int]): The width of the entire output message. If specified, + dynamically resizes the progressbar to stay within this bound. + If unspecified, attempts to use environment width. The + fallback is a meter width of 10 and no limit for the counter and + statistics. If 0, will not print any meter (only stats). + mininterval (Optional[float]): Minimum progress display update interval [default: 0.1] seconds. + maxinterval (Optional[float]): Maximum progress display update interval [default: 10] seconds. + Automatically adjusts `miniters` to correspond to `mininterval` + after long display update lag. Only works if `dynamic_miniters` + or monitor thread is enabled. + miniters (Optional[Union[int, float]]): Minimum progress display update interval, in iterations. + If 0 and `dynamic_miniters`, will automatically adjust to equal + `mininterval` (more CPU efficient, good for tight loops). + If > 0, will skip display of specified number of iterations. + Tweak this and `mininterval` to get very efficient loops. + If your progress is erratic with both fast and slow iterations + (network, skipping items, etc) you should set miniters=1. + use_ascii (Optional[Union[bool, str]]): If unspecified or False, use unicode (smooth blocks) to fill + the meter. The fallback is to use ASCII characters " 123456789#". + disable (Optional[bool]): Whether to disable the entire progressbar wrapper + [default: False]. If set to None, disable on non-TTY. + unit (Optional[str]): String that will be used to define the unit of each iteration + [default: it]. + unit_scale (Union[bool, int, float]): If 1 or True, the number of iterations will be reduced/scaled + automatically and a metric prefix following the + International System of Units standard will be added + (kilo, mega, etc.) [default: False]. If any other non-zero + number, will scale `total` and `n`. + dynamic_ncols (Optional[bool]): If set, constantly alters `ncols` and `nrows` to the + environment (allowing for window resizes) [default: False]. + smoothing (Optional[float]): Exponential moving average smoothing factor for speed estimates + (ignored in GUI mode). Ranges from 0 (average speed) to 1 + (current/instantaneous speed) [default: 0.3]. + bar_format (Optional[str]): Specify a custom bar string formatting. May impact performance. + [default: '{l_bar}{bar}{r_bar}'], where + l_bar='{desc}: {percentage:3.0f}%|' and + r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' + '{rate_fmt}{postfix}]' + Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, + percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, + rate, rate_fmt, rate_noinv, rate_noinv_fmt, + rate_inv, rate_inv_fmt, postfix, unit_divisor, + remaining, remaining_s, eta. + Note that a trailing ": " is automatically removed after {desc} + if the latter is empty. + initial (Optional[Union[int, float]]): The initial counter value. Useful when restarting a progress + bar [default: 0]. If using float, consider specifying `{n:.3f}` + or similar in `bar_format`, or specifying `unit_scale`. + position (Optional[int]): Specify the line offset to print this bar (starting from 0) + Automatic if unspecified. + Useful to manage multiple bars at once (eg, from threads). + postfix (Optional[Dict]): Specify additional stats to display at the end of the bar. + Calls `set_postfix(**postfix)` if possible (dict). + unit_divisor (Optional[float]): [default: 1000], ignored unless `unit_scale` is True. + write_bytes (Optional[bool]): If (default: None) and `file` is unspecified, + bytes will be written in Python 2. If `True` will also write + bytes. In all other cases will default to unicode. + lock_args (Optional[tuple]): Passed to `refresh` for intermediate output + (initialisation, iterating, and updating). + nrows (Optional[int]): The screen height. If specified, hides nested bars + outside this bound. If unspecified, attempts to use environment height. + The fallback is 20. + colour (Optional[str]): Bar colour (e.g. 'green', '#00ff00'). + delay (Optional[float]): Don't display until [default: 0] seconds have elapsed. + gui (Optional[bool]): WARNING: internal parameter - do not use. + Use tqdm.gui.tqdm(...) instead. If set, will attempt to use + matplotlib animations for a graphical output [default: False]. Example: @@ -179,8 +180,10 @@ def __init__( self.total: Optional[Union[int, float]] def update_to(self, chunk_number: int = 1, max_chunk_size: int = 1, total_size=None): - """Progress bar hook for tqdm. The implementor does not have to bother about passing parameters to this - as it gets them from urlretrieve. However the context needs a few parameters. Refer to the example. + """Progress bar hook for tqdm. + + The implementor does not have to bother about passing parameters to this as it gets them from urlretrieve. + However the context needs a few parameters. Refer to the example. Args: chunk_number (int, optional): The current chunk being processed. Defaults to 1. diff --git a/anomalib/utils/metrics.py b/anomalib/utils/metrics.py index 86e360f958..80be27e05e 100644 --- a/anomalib/utils/metrics.py +++ b/anomalib/utils/metrics.py @@ -1,7 +1,4 @@ -""" -Metrics -This module contains metric-related util functions. -""" +"""Metrics This module contains metric-related util functions.""" # Copyright (C) 2020 Intel Corporation # @@ -28,9 +25,7 @@ def compute_threshold_and_f1_score( ground_truth: Union[Tensor, np.ndarray], predictions: Union[Tensor, np.ndarray] ) -> Tuple[float, float]: - """ - Compute adaptive threshold, based on the f1 metric of the - true labels and the predicted anomaly scores + """Compute adaptive threshold, based on the f1 metric of the true labels and the predicted anomaly scores. Args: ground_truth: Pixel-level or image-level ground truth labels. @@ -47,7 +42,6 @@ def compute_threshold_and_f1_score( Returns: Threshold value based on the best f1 score. Value of the best f1 score. - """ precision, recall, thresholds = precision_recall_curve(ground_truth.flatten(), predictions.flatten()) diff --git a/anomalib/utils/post_process.py b/anomalib/utils/post_process.py index 8d11af5e32..c0fe261e20 100644 --- a/anomalib/utils/post_process.py +++ b/anomalib/utils/post_process.py @@ -1,8 +1,4 @@ -""" -Post Process -This module contains utils function to apply post-processing -to the output predictions. -""" +"""Post Process This module contains utils function to apply post-processing to the output predictions.""" # Copyright (C) 2020 Intel Corporation # @@ -25,8 +21,7 @@ def anomaly_map_to_color_map(anomaly_map: np.ndarray, normalize: bool = True) -> np.ndarray: - """ - Compute anomaly color heatmap + """Compute anomaly color heatmap. Args: anomaly_map (np.ndarray): Final anomaly map computed by the distance metric. @@ -48,8 +43,7 @@ def anomaly_map_to_color_map(anomaly_map: np.ndarray, normalize: bool = True) -> def superimpose_anomaly_map( anomaly_map: np.ndarray, image: np.ndarray, alpha: float = 0.4, gamma: int = 0 ) -> np.ndarray: - """ - Superimpose anomaly map on top of in the input image + """Superimpose anomaly map on top of in the input image. Args: anomaly_map (np.ndarray): Anomaly map @@ -62,7 +56,7 @@ def superimpose_anomaly_map( I' = (alpha*I1 + (1-alpha)*I2) + gamma Returns: - np.ndarray: [description] + np.ndarray: Image with anomaly map superimposed on top of it. """ anomaly_map = anomaly_map_to_color_map(anomaly_map.squeeze()) @@ -72,8 +66,7 @@ def superimpose_anomaly_map( def compute_mask(anomaly_map: np.ndarray, threshold: float, kernel_size: int = 4) -> np.ndarray: - """ - Compute anomaly mask via thresholding the predicted anomaly map. + """Compute anomaly mask via thresholding the predicted anomaly map. Args: anomaly_map: Anomaly map predicted via the model @@ -85,7 +78,6 @@ def compute_mask(anomaly_map: np.ndarray, threshold: float, kernel_size: int = 4 Returns: Predicted anomaly mask - """ anomaly_map = anomaly_map.squeeze() diff --git a/anomalib/utils/visualizer.py b/anomalib/utils/visualizer.py index ba1b283281..03d21741ab 100644 --- a/anomalib/utils/visualizer.py +++ b/anomalib/utils/visualizer.py @@ -1,6 +1,4 @@ -""" -Anomaly Visualization -""" +"""Anomaly Visualization.""" # Copyright (C) 2020 Intel Corporation # @@ -24,9 +22,7 @@ class Visualizer: - """ - Anomaly Visualization - """ + """Anomaly Visualization.""" def __init__(self, num_rows: int, num_cols: int, figure_size: Tuple[int, int]): self.figure_index: int = 0 @@ -39,15 +35,13 @@ def __init__(self, num_rows: int, num_cols: int, figure_size: Tuple[int, int]): axis.axes.yaxis.set_visible(False) def add_image(self, image: np.ndarray, title: str, color_map: Optional[str] = None, index: Optional[int] = None): - """ - Add image to figure + """Add image to figure. Args: - image: np.ndarray: - title: str: Image title shown on the plot. - color_map: Optional[str]: (Default value = None) - index: Optional[int]: (Default value = None) - + image (np.ndarray): Image which should be added to the figure. + title (str): Image title shown on the plot. + color_map (Optional[str]): Name of matplotlib color map used to map scalar data to colours. Defaults to None. + index (Optional[int]): Figure index. Defaults to None. """ if index is None: index = self.figure_index @@ -57,14 +51,11 @@ def add_image(self, image: np.ndarray, title: str, color_map: Optional[str] = No self.axis[index].title.set_text(title) def show(self): - """ - Show image on a matplotlib figure. - """ + """Show image on a matplotlib figure.""" self.figure.show() def save(self, filename: Path): - """ - Save image. + """Save image. Args: filename: Path: Filename to save image @@ -73,7 +64,5 @@ def save(self, filename: Path): self.figure.savefig(filename, dpi=100) def close(self): - """ - Close figure. - """ + """Close figure.""" plt.close(self.figure) diff --git a/setup.py b/setup.py index 08f841392e..9704fae14e 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,4 @@ -""" -Setup file for anomalib -""" +"""Setup file for anomalib.""" # Copyright (C) 2020 Intel Corporation # diff --git a/tests/__init__.py b/tests/__init__.py index e2c2dd7cc6..756ffd04c9 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,6 +1,4 @@ -""" -Tests -""" +"""Tests.""" # Copyright (C) 2020 Intel Corporation # diff --git a/tests/config/__init__.py b/tests/config/__init__.py index 05e883f670..e3d7183a41 100644 --- a/tests/config/__init__.py +++ b/tests/config/__init__.py @@ -1,6 +1,4 @@ -""" -Tests for configuration getters/setters -""" +"""Tests for configuration getters/setters.""" # Copyright (C) 2020 Intel Corporation # diff --git a/tests/config/test_config.py b/tests/config/test_config.py index 95c8ce190b..6d5cf57069 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -1,6 +1,4 @@ -""" -Test Config Getter. -""" +"""Test Config Getter.""" # Copyright (C) 2020 Intel Corporation # @@ -22,21 +20,15 @@ class TestConfig: - """ - Test Config Getter - """ + """Test Config Getter.""" def test_get_configurable_parameters_return_correct_model_name(self): - """ - Configurable parameter should return the correct model name. - """ + """Configurable parameter should return the correct model name.""" model_name = "stfpm" configurable_parameters = get_configurable_parameters(model_name) assert configurable_parameters.model.name == model_name def test_get_configurable_parameter_fails_with_none_arguments(self): - """ - Configurable parameter should raise an error with none arguments. - """ + """Configurable parameter should raise an error with none arguments.""" with pytest.raises(ValueError): get_configurable_parameters() diff --git a/tests/core/callbacks/compress_callback/dummy_lightning_model.py b/tests/core/callbacks/compress_callback/dummy_lightning_model.py index 8db81c5f74..c3e0223856 100644 --- a/tests/core/callbacks/compress_callback/dummy_lightning_model.py +++ b/tests/core/callbacks/compress_callback/dummy_lightning_model.py @@ -42,8 +42,7 @@ def test_dataloader(self): class DummyModel(nn.Module): """Creates a very basic CNN model to fit image data for classification task - The test uses this to check if this model is converted to OpenVINO IR - """ + The test uses this to check if this model is converted to OpenVINO IR.""" def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__() @@ -68,7 +67,7 @@ def forward(self, x): class DummyLightningModule(pl.LightningModule): - """A dummy model which fits the torchvision FakeData dataset""" + """A dummy model which fits the torchvision FakeData dataset.""" def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__() diff --git a/tests/core/callbacks/compress_callback/test_compress.py b/tests/core/callbacks/compress_callback/test_compress.py index 45c55b7af8..238cd08255 100644 --- a/tests/core/callbacks/compress_callback/test_compress.py +++ b/tests/core/callbacks/compress_callback/test_compress.py @@ -13,7 +13,7 @@ def test_compress_model_callback(): - """Tests if an optimized model is created""" + """Tests if an optimized model is created.""" config = get_configurable_parameters(model_config_path="tests/core/callbacks/compress_callback/dummy_config.yml") @@ -21,7 +21,9 @@ def test_compress_model_callback(): config.project.path = tmp_dir model = DummyLightningModule(hparams=config) model.callbacks = [ - CompressModelCallback(config=config, dirpath=os.path.join(tmp_dir), filename="compressed_model"), + CompressModelCallback( + input_size=config.model.input_size, dirpath=os.path.join(tmp_dir), filename="compressed_model" + ), EarlyStopping(monitor=config.model.metric), ] datamodule = FakeDataModule() diff --git a/tests/core/callbacks/visualizer_callback/dummy_lightning_model.py b/tests/core/callbacks/visualizer_callback/dummy_lightning_model.py index 4d95581281..3abc43e597 100644 --- a/tests/core/callbacks/visualizer_callback/dummy_lightning_model.py +++ b/tests/core/callbacks/visualizer_callback/dummy_lightning_model.py @@ -44,7 +44,8 @@ def __init__(self): class DummyModule(AnomalyModule): - """A dummy model which calls visualizer callback on fake images and masks""" + """A dummy model which calls visualizer callback on fake images and + masks.""" def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__(hparams) @@ -59,7 +60,7 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.results.anomaly_maps = np.ones((2, 100, 100)) def test_step(self, batch, _): - """Only used to trigger on_test_epoch_end""" + """Only used to trigger on_test_epoch_end.""" self.log(name="loss", value=0.0, prog_bar=True) def test_step_end(self, test_step_outputs): diff --git a/tests/core/callbacks/visualizer_callback/test_visualizer.py b/tests/core/callbacks/visualizer_callback/test_visualizer.py index fcb1755813..200c91ea8f 100644 --- a/tests/core/callbacks/visualizer_callback/test_visualizer.py +++ b/tests/core/callbacks/visualizer_callback/test_visualizer.py @@ -23,7 +23,7 @@ def get_dummy_logger(config, tempdir): @pytest.mark.parametrize("dataset", ["segmentation"]) def test_add_images(dataset): - """Tests if tensorboard logs are generated""" + """Tests if tensorboard logs are generated.""" with tempfile.TemporaryDirectory() as dir_loc: config = OmegaConf.create( { diff --git a/tests/datasets/test_anomaly_dataset.py b/tests/datasets/test_anomaly_dataset.py index c53bf7682d..bf9c5e3813 100644 --- a/tests/datasets/test_anomaly_dataset.py +++ b/tests/datasets/test_anomaly_dataset.py @@ -1,4 +1,4 @@ -"""Test Anomaly Dataset""" +"""Test Anomaly Dataset.""" import pytest import pytorch_lightning as pl @@ -74,8 +74,10 @@ def configure_optimizers(self): @pytest.mark.parametrize("task", ["classification", "segmentation", "detection"]) @TestDataset(num_train=200, num_test=10, path=get_dataset_path(), use_mvtec=False) def test_anomaly_dataset(task, path=get_dataset_path(), category="leather"): - """Test anomaly dataset using MVTec dataset - Used to check whether the dataloader works as intended. The category of the dataset does not matter. + """Test anomaly dataset using MVTec dataset Used to check whether the + dataloader works as intended. + + The category of the dataset does not matter. """ DATASET_URL = "ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz" diff --git a/tests/datasets/test_dataset.py b/tests/datasets/test_dataset.py index 528a8ad3ee..27c6909913 100644 --- a/tests/datasets/test_dataset.py +++ b/tests/datasets/test_dataset.py @@ -1,6 +1,4 @@ -""" -Test Dataset -""" +"""Test Dataset.""" import numpy as np import pytest @@ -35,18 +33,15 @@ def data_sample(data_module): class TestMVTecDataModule: def test_batch_size(self, data_module): - """ - test_mvtec_datamodule [summary] - """ + """test_mvtec_datamodule [summary]""" _, train_data_sample = next(enumerate(data_module.train_dataloader())) _, val_data_sample = next(enumerate(data_module.val_dataloader())) assert train_data_sample["image"].shape[0] == 1 assert val_data_sample["image"].shape[0] == 1 def test_val_and_test_dataloaders_has_mask_and_gt(self, data_module): - """ - Validation and Test dataloaders should return filenames, image, mask and label - """ + """Validation and Test dataloaders should return filenames, image, mask + and label.""" _, val_data = next(enumerate(data_module.val_dataloader())) _, test_data = next(enumerate(data_module.test_dataloader())) @@ -55,74 +50,54 @@ def test_val_and_test_dataloaders_has_mask_and_gt(self, data_module): class TestDenormalize: - """ - Test Denormalize Util - """ + """Test Denormalize Util.""" def test_denormalize_image_pixel_values(self, data_sample): - """ - Test Denormalize denormalizes tensor into [0, 256] range. - """ + """Test Denormalize denormalizes tensor into [0, 256] range.""" denormalized_sample = Denormalize().__call__(data_sample["image"].squeeze()) assert denormalized_sample.min() >= 0 and denormalized_sample.max() <= 256 def test_denormalize_return_numpy(self, data_sample): - """ - Denormalize should return a numpy array - """ + """Denormalize should return a numpy array.""" denormalized_sample = Denormalize()(data_sample["image"].squeeze()) assert isinstance(denormalized_sample, np.ndarray) def test_denormalize_channel_order(self, data_sample): - """ - Denormalize should return a numpy array of order [HxWxC] - """ + """Denormalize should return a numpy array of order [HxWxC]""" denormalized_sample = Denormalize().__call__(data_sample["image"].squeeze()) assert len(denormalized_sample.shape) == 3 and denormalized_sample.shape[-1] == 3 def test_representation(self): - """ - Test Denormalize representation should return string Denormalize() - """ + """Test Denormalize representation should return string + Denormalize()""" assert str(Denormalize()) == "Denormalize()" class TestToNumpy: - """ - Test ToNumpy whether it properly converts tensor into numpy array. - """ + """Test ToNumpy whether it properly converts tensor into numpy array.""" def test_to_numpy_image_pixel_values(self, data_sample): - """ - Test ToNumpy should return an array whose pixels in the range of [0, 256] - """ + """Test ToNumpy should return an array whose pixels in the range of [0, + 256]""" array = ToNumpy()(data_sample["image"]) assert array.min() >= 0 and array.max() <= 256 def test_to_numpy_converts_tensor_to_np_array(self, data_sample): - """ - ToNumpy returns a numpy array - """ + """ToNumpy returns a numpy array.""" array = ToNumpy()(data_sample["image"]) assert isinstance(array, np.ndarray) def test_to_numpy_channel_order(self, data_sample): - """ - ToNumpy() should return a numpy array of order [HxWxC] - """ + """ToNumpy() should return a numpy array of order [HxWxC]""" array = ToNumpy()(data_sample["image"]) assert len(array.shape) == 3 and array.shape[-1] == 3 def test_one_channel_images(self, data_sample): - """ - One channel tensor should be converted to HxW np array - """ + """One channel tensor should be converted to HxW np array.""" data = data_sample["image"][:, 0, :, :].unsqueeze(0) array = ToNumpy()(data) assert len(array.shape) == 2 def test_representation(self): - """ - Test ToNumpy() representation should return string `ToNumpy()` - """ + """Test ToNumpy() representation should return string `ToNumpy()`""" assert str(ToNumpy()) == "ToNumpy()" diff --git a/tests/datasets/test_tiler.py b/tests/datasets/test_tiler.py index da6f6f02b5..1448d8ee95 100644 --- a/tests/datasets/test_tiler.py +++ b/tests/datasets/test_tiler.py @@ -1,6 +1,4 @@ -""" -Image Tiling Tests. -""" +"""Image Tiling Tests.""" import pytest import torch @@ -43,9 +41,7 @@ [(512, 256), ([512, 512], [256, 256]), (ListConfig([512, 512]), 256)], ) def test_size_types_should_be_int_tuple_or_list_config(tile_size, stride): - """ - Size type could only be integer, tuple or ListConfig type. - """ + """Size type could only be integer, tuple or ListConfig type.""" tiler = Tiler(tile_size=tile_size, stride=stride) assert isinstance(tiler.tile_size_h, int) assert isinstance(tiler.stride_w, int) @@ -53,9 +49,7 @@ def test_size_types_should_be_int_tuple_or_list_config(tile_size, stride): @pytest.mark.parametrize("image_size, tile_size, stride, shape, use_random_tiling", tile_data) def test_tiler_handles_single_image_without_batch_dimension(image_size, tile_size, stride, shape, use_random_tiling): - """ - Tiler should add batch dimension if image is 3D (CxHxW). - """ + """Tiler should add batch dimension if image is 3D (CxHxW).""" tiler = Tiler(tile_size=tile_size, stride=stride) image = torch.rand(image_size) patches = tiler.tile(image, use_random_tiling=use_random_tiling) @@ -63,9 +57,7 @@ def test_tiler_handles_single_image_without_batch_dimension(image_size, tile_siz def test_stride_size_cannot_be_larger_than_tile_size(): - """ - Larger stride size than tile size is not desired, and causes issues. - """ + """Larger stride size than tile size is not desired, and causes issues.""" kernel_size = (128, 128) stride = 256 with pytest.raises(StrideSizeError): @@ -73,9 +65,7 @@ def test_stride_size_cannot_be_larger_than_tile_size(): def test_tile_size_cannot_be_larger_than_image_size(): - """ - Larger tile size than image size is not desired, and causes issues. - """ + """Larger tile size than image size is not desired, and causes issues.""" with pytest.raises(ValueError): tiler = Tiler(tile_size=1024, stride=512) image = torch.rand(1, 3, 512, 512) @@ -84,9 +74,7 @@ def test_tile_size_cannot_be_larger_than_image_size(): @pytest.mark.parametrize("tile_size, kernel_size, stride, image_size", untile_data) def test_untile_non_overlapping_patches(tile_size, kernel_size, stride, image_size): - """ - Non-Overlapping Tiling/Untiling should return the same image size. - """ + """Non-Overlapping Tiling/Untiling should return the same image size.""" tiler = Tiler(tile_size=kernel_size, stride=stride) image = torch.rand(image_size) tiles = tiler.tile(image) @@ -104,9 +92,7 @@ def test_upscale_downscale_mode(mode): @pytest.mark.parametrize("image_size, kernel_size, stride, tile_size, mode", overlapping_data) @pytest.mark.parametrize("remove_border_count", [0, 5]) def test_untile_overlapping_patches(image_size, kernel_size, stride, remove_border_count, tile_size, mode): - """ - Overlapping Tiling/Untiling should return the same image size. - """ + """Overlapping Tiling/Untiling should return the same image size.""" tiler = Tiler( tile_size=kernel_size, stride=stride, @@ -137,11 +123,8 @@ def test_untile_overlapping_patches(image_size, kernel_size, stride, remove_bord @pytest.mark.parametrize("stride", [(64, 64), (111, 111), (128, 111), (128, 128)]) @pytest.mark.parametrize("mode", ["padding", "interpolation"]) def test_divisible_tile_size_and_stride(image_size, tile_size, stride, mode): - """ - When the image is not divisible by tile size and stride, - Tiler should up samples the image before tiling, and downscales - before untiling. - """ + """When the image is not divisible by tile size and stride, Tiler should up + samples the image before tiling, and downscales before untiling.""" tiler = Tiler(tile_size, stride, mode=mode) image = torch.rand(image_size) tiles = tiler.tile(image) diff --git a/tests/datasets/test_transforms.py b/tests/datasets/test_transforms.py index 68f9ea2e8c..76e7e8157f 100644 --- a/tests/datasets/test_transforms.py +++ b/tests/datasets/test_transforms.py @@ -1,5 +1,4 @@ -""" -Data transformation test +"""Data transformation test. This test contains the following test: - Transformations could be ``None``, ``yaml``, ``json`` or ``dict``. @@ -18,20 +17,16 @@ def test_transforms_and_image_size_cannot_be_none(): - """ - When transformations ``config`` and ``image_size`` are ``None`` - ``PreProcessor`` class should raise a ``ValueError``. - """ + """When transformations ``config`` and ``image_size`` are ``None`` + ``PreProcessor`` class should raise a ``ValueError``.""" with pytest.raises(ValueError): PreProcessor(config=None, image_size=None) def test_image_size_could_be_int_or_tuple(): - """ - When ``config`` is None, ``image_size`` could be - either ``int`` or ``Tuple[int, int]``. - """ + """When ``config`` is None, ``image_size`` could be either ``int`` or + ``Tuple[int, int]``.""" PreProcessor(config=None, image_size=256) PreProcessor(config=None, image_size=(256, 512)) @@ -40,10 +35,9 @@ def test_image_size_could_be_int_or_tuple(): def test_load_transforms_from_string(): - """ - When the pre-processor is instantiated via a transform config file, - it should work with either string or A.Compose and return a ValueError otherwise. - """ + """When the pre-processor is instantiated via a transform config file, it + should work with either string or A.Compose and return a ValueError + otherwise.""" # Create a dummy transformation. transforms = A.Compose( @@ -69,10 +63,8 @@ def test_load_transforms_from_string(): def test_to_tensor_returns_correct_type(): - """ - `to_tensor` flag should ensure that pre-processor returns the expected - type. - """ + """`to_tensor` flag should ensure that pre-processor returns the expected + type.""" image = skimage.data.astronaut() pre_processor = PreProcessor(config=None, image_size=256, to_tensor=True) diff --git a/tests/helpers/dataset.py b/tests/helpers/dataset.py index 920b397c28..ce181a4b6b 100644 --- a/tests/helpers/dataset.py +++ b/tests/helpers/dataset.py @@ -13,11 +13,12 @@ def get_dataset_path(path: Union[str, Path] = "./datasets/MVTec"): - """ - Selects path based on tests in local system or docker image. - Local install assumes dataset is downloaded to anomaly/datasets/MVTec. - In either case, if the location is empty, the dataset is downloaded again. - This speeds up tests in docker images where dataset is already stored in /tmp/anomalib + """Selects path based on tests in local system or docker image. + + Local install assumes dataset is downloaded to + anomaly/datasets/MVTec. In either case, if the location is empty, + the dataset is downloaded again. This speeds up tests in docker + images where dataset is already stored in /tmp/anomalib """ # when running locally path = str(path) @@ -172,7 +173,8 @@ def __init__( self.max_size = max_size def _generate_dataset(self): - """Generates dummy dataset in a temporary directory using the same convention as MVTec""" + """Generates dummy dataset in a temporary directory using the same + convention as MVTec.""" # create train images train_path = os.path.join(self.root_dir, "shapes", "train", "good") os.makedirs(train_path, exist_ok=True) @@ -224,10 +226,10 @@ def _generate_dataset(self): imsave(os.path.join(test_good, f"{i:03}.png"), image, check_contrast=False) def __enter__(self): - """Creates the dataset in temp folder""" + """Creates the dataset in temp folder.""" self._generate_dataset() return self.root_dir def __exit__(self, _exc_type, _exc_value, _exc_traceback): - """Cleanup the directory""" + """Cleanup the directory.""" shutil.rmtree(self.root_dir) diff --git a/tests/helpers/detection.py b/tests/helpers/detection.py index a8976290c1..3f9af882f7 100644 --- a/tests/helpers/detection.py +++ b/tests/helpers/detection.py @@ -1,4 +1,4 @@ -"""Helpers for detection tests""" +"""Helpers for detection tests.""" import os import xml.etree.cElementTree as ET from glob import glob @@ -9,8 +9,9 @@ class BBFromMasks: - """Creates temporary XML files from masks for testing. Intended to be used as a context so that - the XML files are automatically deleted when the execution goes out of scope + """Creates temporary XML files from masks for testing. Intended to be used + as a context so that the XML files are automatically deleted when the + execution goes out of scope. Example: @@ -28,7 +29,7 @@ def __init__(self, root: str = "datasets/MVTec", dataset_name: str = "MVTec") -> self.generated_xml_files: List[str] = [] def __enter__(self): - """Generate XML files""" + """Generate XML files.""" for mask_path in glob(os.path.join(self.root, "*/ground_truth/*/*_mask.png")): path_tree = mask_path.split("/") image = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) @@ -50,14 +51,14 @@ def __enter__(self): self.generated_xml_files.append(output_loc) def __exit__(self, _exc_type, _exc_value, _exc_traceback): - """Cleans up generated XML files""" + """Cleans up generated XML files.""" for file in self.generated_xml_files: os.remove(file) def _create_xml_contents( self, boxes: List[List[List[np.int]]], path_tree: List[str], image_size: Tuple[int, int] ) -> ET.Element: - """Create the contents of the annotation file in Pascal VOC format + """Create the contents of the annotation file in Pascal VOC format. Args: boxes (List[List[List[np.int]]]): The calculated pox corners from the masks diff --git a/tests/helpers/shapes.py b/tests/helpers/shapes.py index d3372b78de..b013fa1273 100644 --- a/tests/helpers/shapes.py +++ b/tests/helpers/shapes.py @@ -5,7 +5,7 @@ def random_square_patch(input_region: List[int], min_width: int = 10) -> List[int]: - """Gets a random patch in the input region + """Gets a random patch in the input region. Args: input_region (List[int]): Coordinates of the input region. [x1, y1, x2, y2] @@ -47,7 +47,7 @@ def random_square_patch(input_region: List[int], min_width: int = 10) -> List[in def triangle(input_region: List[int]) -> Tuple[List[int], List[int]]: - """Get coordinates of points inside a triangle + """Get coordinates of points inside a triangle. Args: input_region (List[int]): Region in which to draw the triangle. [x1, y1, x2, y2] @@ -70,7 +70,8 @@ def triangle(input_region: List[int]) -> Tuple[List[int], List[int]]: def rectangle(input_region: List[int], min_side: int = 10) -> Tuple[List[int], List[int]]: - """Get coordinates of corners of a rectangle. Only vertical rectangles are generated. + """Get coordinates of corners of a rectangle. Only vertical rectangles are + generated. Args: input_region (List[int]): Region in which to draw the rectangle. [x1, y1, x2, y2] @@ -103,7 +104,7 @@ def rectangle(input_region: List[int], min_side: int = 10) -> Tuple[List[int], L def hexagon(input_region: List[int]) -> Tuple[List[int], List[int]]: - """Get coordinates of points inside a hexagon + """Get coordinates of points inside a hexagon. Args: input_region (List[int]): Region in which to draw the hexagon. [x1, y1, x2, y2] @@ -132,7 +133,7 @@ def hexagon(input_region: List[int]) -> Tuple[List[int], List[int]]: def star(input_region: List[int]) -> Tuple[List[int], List[int]]: - """Get coordinates of points inside a star + """Get coordinates of points inside a star. Args: input_region (List[int]): Region in which to draw the star. [x1, y1, x2, y2] @@ -175,7 +176,7 @@ def star(input_region: List[int]) -> Tuple[List[int], List[int]]: def random_shapes( input_region: List[int], size: Tuple[int, int], max_shapes: int, shape: str = "rectangle" ) -> np.ndarray: - """Generate image with random shape + """Generate image with random shape. Args: input_region (List[int]): Coordinates of the input region. [x1, y1, x2, y2] diff --git a/tests/loggers/__init__.py b/tests/loggers/__init__.py index 0d14ca4904..23adccdc4b 100644 --- a/tests/loggers/__init__.py +++ b/tests/loggers/__init__.py @@ -1,6 +1,4 @@ -""" -Test supported loggers -""" +"""Test supported loggers.""" # Copyright (C) 2020 Intel Corporation # diff --git a/tests/loggers/test_get_logger.py b/tests/loggers/test_get_logger.py index af04a11a11..935b6f6573 100644 --- a/tests/loggers/test_get_logger.py +++ b/tests/loggers/test_get_logger.py @@ -1,6 +1,4 @@ -""" -Tests to ascertain requested logger -""" +"""Tests to ascertain requested logger.""" # Copyright (C) 2020 Intel Corporation # @@ -23,7 +21,7 @@ def test_get_logger(): - """Test whether the right logger is returned""" + """Test whether the right logger is returned.""" config = OmegaConf.create( { diff --git a/tests/models/__init__.py b/tests/models/__init__.py index 91a79fea22..28c3d19488 100644 --- a/tests/models/__init__.py +++ b/tests/models/__init__.py @@ -1,6 +1,4 @@ -""" -Test models -""" +"""Test models.""" # Copyright (C) 2020 Intel Corporation # diff --git a/tests/models/test_model.py b/tests/models/test_model.py index 9a8fa15d97..749cc12293 100644 --- a/tests/models/test_model.py +++ b/tests/models/test_model.py @@ -1,6 +1,4 @@ -""" -Test Models -""" +"""Test Models.""" # Copyright (C) 2020 Intel Corporation # @@ -35,8 +33,7 @@ @pytest.fixture(autouse=True) def category() -> str: - """ - PyTest fixture to randomly return an MVTec category. + """PyTest fixture to randomly return an MVTec category. Returns: str: Random MVTec category to train/test. @@ -64,9 +61,7 @@ def category() -> str: class AddDFMScores: - """ - Function wrapper for checking both scores of DFM - """ + """Function wrapper for checking both scores of DFM.""" def __call__(self, func): @wraps(func) @@ -81,7 +76,7 @@ def inner(*args, **kwds): class TestModel: - """Test model""" + """Test model.""" def _setup(self, model_name, use_mvtec, dataset_path, project_path, nncf, category, score_type=None): config = get_configurable_parameters(model_name=model_name) @@ -119,7 +114,7 @@ def _setup(self, model_name, use_mvtec, dataset_path, project_path, nncf, catego return model, config, datamodule, trainer def _test_metrics(self, trainer, config, model, datamodule): - """Tests the model metrics but also acts as a setup""" + """Tests the model metrics but also acts as a setup.""" trainer.test(model=model, datamodule=datamodule) @@ -171,7 +166,7 @@ def _test_model_load(self, config, datamodule, model): @TestDataset(num_train=200, num_test=10, path=get_dataset_path(), use_mvtec=True) @AddDFMScores() def test_model(self, category, model_name, nncf, use_mvtec=True, path="./datasets/MVTec", score_type=None): - """Driver for all the tests in the class""" + """Driver for all the tests in the class.""" with tempfile.TemporaryDirectory() as project_path: model, config, datamodule, trainer = self._setup( model_name=model_name, diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index e5b9be6e5e..f47e58091f 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -1,6 +1,4 @@ -""" -Tests for utils -""" +"""Tests for utils.""" # Copyright (C) 2020 Intel Corporation # diff --git a/tests/utils/test_download_progress_bar.py b/tests/utils/test_download_progress_bar.py index 8a0904717c..fa0e1e637f 100644 --- a/tests/utils/test_download_progress_bar.py +++ b/tests/utils/test_download_progress_bar.py @@ -1,6 +1,4 @@ -""" -Tests whether progress bar is visible in the UI -""" +"""Tests whether progress bar is visible in the UI.""" # Copyright (C) 2020 Intel Corporation # @@ -24,7 +22,7 @@ def test_output_on_download(capfd): - """Test whether progress bar is shown""" + """Test whether progress bar is shown.""" url = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/SIPI_Jelly_Beans_4.1.07.tiff/lossy-page1-256px-SIPI_Jelly_Beans_4.1.07.tiff.jpg" with tempfile.TemporaryDirectory() as dir_loc: destination = os.path.join(dir_loc, "jelly.jpg") diff --git a/tools/inference.py b/tools/inference.py index 6d6349ec92..9624a0aee1 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -1,7 +1,7 @@ -""" -Anomalib Inferencer Script. -This script performs inference by reading a model config -file from command line, and show the visualization results. +"""Anomalib Inferencer Script. + +This script performs inference by reading a model config file from +command line, and show the visualization results. """ # Copyright (C) 2020 Intel Corporation @@ -32,8 +32,7 @@ def get_args() -> Namespace: - """ - Get command line arguments. + """Get command line arguments. Returns: Namespace: List of arguments. @@ -48,9 +47,7 @@ def get_args() -> Namespace: def infer() -> None: - """ - Perform inference on an input image. - """ + """Perform inference on an input image.""" # Get the command line arguments, and config from the config.yaml file. # This config file is also used for training and contains all the relevant diff --git a/tools/test.py b/tools/test.py index 2c757ca344..dde026a0b1 100644 --- a/tools/test.py +++ b/tools/test.py @@ -1,8 +1,4 @@ -""" -Test -This script performs inference on the test dataset and saves the output - visualizations into a directory. -""" +"""Test This script performs inference on the test dataset and saves the output visualizations into a directory.""" # Copyright (C) 2020 Intel Corporation # @@ -29,8 +25,7 @@ def get_args() -> Namespace: - """ - get_args [summary] + """Get CLI arguments. Returns: Namespace: CLI arguments. @@ -45,9 +40,9 @@ def get_args() -> Namespace: def test(): - """ - Test an anomaly classification and segmentation model that is initially trained via `tools/train.py`. - The script is able to write the results into both filesystem and a logger such as Tensorboard. + """Test an anomaly classification and segmentation model that is initially trained via `tools/train.py`. + + The script is able to write the results into both filesystem and a logger such as Tensorboard. """ args = get_args() config = get_configurable_parameters( diff --git a/tools/train.py b/tools/train.py index c2c8d130a1..d4e609d6f4 100644 --- a/tools/train.py +++ b/tools/train.py @@ -1,8 +1,8 @@ -""" -Anomalib Traning Script. -This script reads the name of the model or config file -from command line, train/test the anomaly model to get -quantitative and qualitative results. +"""Anomalib Traning Script. + +This script reads the name of the model or config file from command +line, train/test the anomaly model to get quantitative and qualitative +results. """ # Copyright (C) 2020 Intel Corporation @@ -31,8 +31,7 @@ def get_args() -> Namespace: - """ - Get command line arguments. + """Get command line arguments. Returns: Namespace: List of arguments. @@ -45,9 +44,7 @@ def get_args() -> Namespace: def train(): - """ - Train an anomaly classification or segmentation model based on a provided configuration file. - """ + """Train an anomaly classification or segmentation model based on a provided configuration file.""" args = get_args() config = get_configurable_parameters(model_name=args.model, model_config_path=args.model_config_path) diff --git a/tox.ini b/tox.ini index a4c0f26528..17fa0b1ddd 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,7 @@ envlist = flake8 pylint mypy + pydocstyle coverage [testenv:black] @@ -50,6 +51,12 @@ commands = python -m mypy --install-types --non-interactive tools/test.py --config-file tox.ini +[testenv:pydocstyle] +basepython = python3.8 +deps = + pydocstyle +commands = pydocstyle anomalib --config=tox.ini + [testenv:coverage] basepython = python3.8 passenv=ftp_proxy @@ -73,13 +80,6 @@ max-line-length=120 ignore=E203,W503 [pylint] -; E0401: Unable to import (import-error) -; R0902: Too many instance attributes -; R0903: Too few public methods -; R0912: Too many branches -; R0913: Too many arguments -; R1721: Unnecessary use of a comprehension -; C0103: Argument name doesn't conform to snake_case naming style (invalid-name) extension-pkg-whitelist=cv2 ignored-modules=cv2 disable=duplicate-code, @@ -116,3 +116,15 @@ exclude_lines = except ApiException raise ApiException raise ValueError + +[pydocstyle] +inherit=false +ignore = D107, ; Missing docstring in __init__ + D202, ; No blank lines allowed after function docstring + D203, ; 1 blank line required before class docstring + D213, ; Multi-line docstring summary should start at the second line + D401, ; First line should be in imperative mood; try rephrasing + D404, ; First word of the docstring should not be This + D406, ; Section name should end with a newline + D407, ; Missing dashed underline after section + D413 ; Missing blank line after last section