Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add min-max normalization #53

Merged
merged 15 commits into from
Jan 5, 2022
20 changes: 13 additions & 7 deletions anomalib/core/callbacks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from pytorch_lightning.callbacks import Callback, ModelCheckpoint

from .compress import CompressModelCallback
from .min_max_normalization import MinMaxNormalizationCallback
from .model_loader import LoadModelCallback
from .normalization import AnomalyScoreNormalizationCallback
from .save_to_csv import SaveToCSVCallback
Expand Down Expand Up @@ -51,17 +52,22 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]:
load_model = LoadModelCallback(os.path.join(config.project.path, config.model.weight_file))
callbacks.append(load_model)

if "normalize_scores" in config.model.keys() and config.model.normalize_scores:
if config.model.name in ["padim", "stfpm"]:
if not config.optimization.nncf.apply:
callbacks.append(AnomalyScoreNormalizationCallback())
if "normalization_method" in config.model.keys() and not config.model.normalization_method == "none":
if config.model.normalization_method == "cdf":
if config.model.name in ["padim", "stfpm"]:
if not config.optimization.nncf.apply:
callbacks.append(AnomalyScoreNormalizationCallback())
djdameln marked this conversation as resolved.
Show resolved Hide resolved
else:
raise NotImplementedError("CDF Score Normalization is currently not compatible with NNCF.")
else:
raise NotImplementedError("Score Normalization is currently not compatible with NNCF.")
raise NotImplementedError("Score Normalization is currently supported for PADIM and STFPM only.")
elif config.model.normalization_method == "min_max":
callbacks.append(MinMaxNormalizationCallback())
else:
raise NotImplementedError("Score Normalization is currently supported for PADIM and STFPM only.")
raise ValueError(f"Normalization method not recognized: {config.model.normalization_method}")

if not config.project.log_images_to == []:
callbacks.append(VisualizerCallback(inputs_are_normalized=config.model.normalize_scores))
callbacks.append(VisualizerCallback(inputs_are_normalized=config.model.normalization_method == "cdf"))
djdameln marked this conversation as resolved.
Show resolved Hide resolved

if "optimization" in config.keys():
if config.optimization.nncf.apply:
Expand Down
65 changes: 65 additions & 0 deletions anomalib/core/callbacks/min_max_normalization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
"""Anomaly Score Normalization Callback that uses min-max normalization."""
djdameln marked this conversation as resolved.
Show resolved Hide resolved
from typing import Any, Dict

import pytorch_lightning as pl
import torch
from pytorch_lightning import Callback
from pytorch_lightning.utilities.types import STEP_OUTPUT


class MinMaxNormalizationCallback(Callback):
"""Callback that normalizes the image-level and pixel-level anomaly scores using min-max normalization."""

def __init__(self):
self.min = float("inf")
self.max = -float("inf")

def on_test_start(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Called when the test begins."""
normalized_image_threshold = (pl_module.image_threshold.value.item() - self.min) / (self.max - self.min)
normalized_pixel_threshold = (pl_module.pixel_threshold.value.item() - self.min) / (self.max - self.min)
pl_module.image_metrics.F1.threshold = normalized_image_threshold
pl_module.pixel_metrics.F1.threshold = normalized_pixel_threshold

def on_validation_batch_end(
self,
_trainer: pl.Trainer,
_pl_module: pl.LightningModule,
outputs: STEP_OUTPUT,
_batch: Any,
_batch_idx: int,
_dataloader_idx: int,
) -> None:
"""Called when the validation batch ends, update the min and max observed values."""
batch_max = torch.max(outputs["anomaly_maps"]).item()
batch_min = torch.min(outputs["anomaly_maps"]).item()
self.max = max(self.max, batch_max)
self.min = min(self.min, batch_min)

def on_test_batch_end(
self,
_trainer: pl.Trainer,
_pl_module: pl.LightningModule,
outputs: STEP_OUTPUT,
_batch: Any,
_batch_idx: int,
_dataloader_idx: int,
) -> None:
"""Called when the test batch ends, normalizes the predicted scores and anomaly maps."""
self._normalize(outputs)

def on_predict_batch_end(
self,
_trainer: pl.Trainer,
_pl_module: pl.LightningModule,
outputs: Dict,
_batch: Any,
_batch_idx: int,
_dataloader_idx: int,
) -> None:
"""Called when the predict batch ends, normalizes the predicted scores and anomaly maps."""
self._normalize(outputs)

def _normalize(self, outputs):
outputs["pred_scores"] = (outputs["pred_scores"] - self.min) / (self.max - self.min)
outputs["anomaly_maps"] = (outputs["anomaly_maps"] - self.min) / (self.max - self.min)
2 changes: 1 addition & 1 deletion anomalib/core/callbacks/visualizer_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def on_test_batch_end(
threshold = 0.5
normalize = False # anomaly maps are already normalized
else:
threshold = pl_module.pixel_threshold.value.item()
threshold = pl_module.pixel_metrics.F1.threshold
normalize = True # raw anomaly maps. Still need to normalize

for (filename, image, true_mask, anomaly_map) in zip(
Expand Down
4 changes: 2 additions & 2 deletions anomalib/core/model/anomaly_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ def _compute_adaptive_threshold(self, outputs):
else:
self.pixel_threshold.value = self.image_threshold.value

self.image_metrics.F1.threshold = self.image_threshold.value
self.pixel_metrics.F1.threshold = self.pixel_threshold.value
self.image_metrics.F1.threshold = self.image_threshold.value.item()
self.pixel_metrics.F1.threshold = self.pixel_threshold.value.item()

def _collect_outputs(self, image_metric, pixel_metric, outputs):
for output in outputs:
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/dfkde/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ model:
confidence_threshold: 0.5
pre_processing: scale
n_components: 16
normalize_scores: false # currently not supported for this model
normalization_method: min_max # options: [null, min_max, cdf]
threshold:
image_default: 0
adaptive: true
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/dfm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ model:
pca_level: 0.97
score_type: fre # nll: for Gaussian modeling, fre: pca feature reconstruction error
project_path: ./results
normalize_scores: false # currently not supported for this model
normalization_method: min_max # options: [null, min_max, cdf]
threshold:
image_default: 0
adaptive: true
Expand Down
6 changes: 3 additions & 3 deletions anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ dataset:
format: mvtec
path: ./datasets/MVTec
url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz
category: leather
category: bottle
djdameln marked this conversation as resolved.
Show resolved Hide resolved
task: segmentation
label_format: None
tiling:
Expand All @@ -26,7 +26,7 @@ model:
- layer2
- layer3
metric: auc
normalize_scores: true
normalization_method: none # options: [none, min_max, cdf]
threshold:
image_default: 3
pixel_default: 3
Expand All @@ -35,7 +35,7 @@ model:
project:
seed: 42
path: ./results
log_images_to: []
log_images_to: [local]
djdameln marked this conversation as resolved.
Show resolved Hide resolved
logger: false
save_to_csv: false

Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/patchcore/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ model:
num_neighbors: 9
metric: auc
weight_file: weights/model.ckpt
normalize_scores: false # currently not supported for this model
normalization_method: min_max # options: [null, min_max, cdf]
threshold:
image_default: 0
pixel_default: 0
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/stfpm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ model:
patience: 3
metric: pixel_AUROC
mode: max
normalize_scores: false
normalization_method: min_max # options: [null, min_max, cdf]
threshold:
image_default: 0
pixel_default: 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,22 @@ def test_normalizer():
config.dataset.path = get_dataset_path(config.dataset.path)
config.model.threshold.adaptive = True

# run with normalization
config.model.normalize_scores = True
# run without normalization
config.model.normalization_method = "none"
seed_everything(42)
results_with_normalization = run_train_test(config)
results_without_normalization = run_train_test(config)

# run with cdf normalization
config.model.normalization_method = "cdf"
seed_everything(42)
results_with_cdf_normalization = run_train_test(config)

# run without normalization
config.model.normalize_scores = False
config.model.normalization_method = "min_max"
seed_everything(42)
results_without_normalization = run_train_test(config)
results_with_minmax_normalization = run_train_test(config)

# performance should be the same
for metric in ["image_AUROC", "image_F1"]:
assert results_without_normalization[0][metric] == results_with_normalization[0][metric]
assert results_without_normalization[0][metric] == results_with_cdf_normalization[0][metric]
assert results_without_normalization[0][metric] == results_with_minmax_normalization[0][metric]