Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

perform metric computation on cpu #64

Merged
merged 3 commits into from
Jan 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions anomalib/core/callbacks/min_max_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ def on_predict_batch_end(
@staticmethod
def _normalize_batch(outputs, pl_module):
"""Normalize a batch of predictions."""
stats = pl_module.min_max
stats = pl_module.min_max.cpu()
outputs["pred_scores"] = normalize(
outputs["pred_scores"], pl_module.image_threshold.value, stats.min, stats.max
outputs["pred_scores"], pl_module.image_threshold.value.cpu(), stats.min, stats.max
)
if "anomaly_maps" in outputs.keys():
outputs["anomaly_maps"] = normalize(
outputs["anomaly_maps"], pl_module.pixel_threshold.value, stats.min, stats.max
outputs["anomaly_maps"], pl_module.pixel_threshold.value.cpu(), stats.min, stats.max
)
20 changes: 15 additions & 5 deletions anomalib/core/model/anomaly_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,19 +48,19 @@ def __init__(self, params: Union[DictConfig, ListConfig]):
self.loss: Tensor
self.callbacks: List[Callback]

self.image_threshold = AdaptiveThreshold(self.hparams.model.threshold.image_default)
self.image_threshold = AdaptiveThreshold(self.hparams.model.threshold.image_default).cpu()
self.pixel_threshold = AdaptiveThreshold(self.hparams.model.threshold.pixel_default)

self.training_distribution = AnomalyScoreDistribution()
self.min_max = MinMax()
self.training_distribution = AnomalyScoreDistribution().cpu()
self.min_max = MinMax().cpu()

self.model: nn.Module

# metrics
auroc = AUROC(num_classes=1, pos_label=1, compute_on_step=False)
f1_score = F1(num_classes=1, compute_on_step=False)
self.image_metrics = MetricCollection([auroc, f1_score], prefix="image_")
self.pixel_metrics = self.image_metrics.clone(prefix="pixel_")
self.image_metrics = MetricCollection([auroc, f1_score], prefix="image_").cpu()
self.pixel_metrics = self.image_metrics.clone(prefix="pixel_").cpu()

def forward(self, batch): # pylint: disable=arguments-differ
"""Forward-pass input tensor to the module.
Expand Down Expand Up @@ -111,11 +111,13 @@ def test_step(self, batch, _): # pylint: disable=arguments-differ

def validation_step_end(self, val_step_outputs): # pylint: disable=arguments-differ
"""Called at the end of each validation step."""
self._outputs_to_cpu(val_step_outputs)
self._post_process(val_step_outputs)
return val_step_outputs

def test_step_end(self, test_step_outputs): # pylint: disable=arguments-differ
"""Called at the end of each test step."""
self._outputs_to_cpu(test_step_outputs)
self._post_process(test_step_outputs)
return test_step_outputs

Expand Down Expand Up @@ -152,8 +154,10 @@ def _compute_adaptive_threshold(self, outputs):

def _collect_outputs(self, image_metric, pixel_metric, outputs):
for output in outputs:
image_metric.cpu()
image_metric.update(output["pred_scores"], output["label"].int())
if "mask" in output.keys() and "anomaly_maps" in output.keys():
pixel_metric.cpu()
pixel_metric.update(output["anomaly_maps"].flatten(), output["mask"].flatten().int())

def _post_process(self, outputs):
Expand All @@ -163,6 +167,12 @@ def _post_process(self, outputs):
outputs["anomaly_maps"].reshape(outputs["anomaly_maps"].shape[0], -1).max(dim=1).values
)

def _outputs_to_cpu(self, output):
# for output in outputs:
for key, value in output.items():
if isinstance(value, Tensor):
output[key] = value.cpu()

def _log_metrics(self):
"""Log computed performance metrics."""
self.log_dict(self.image_metrics)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,7 @@ def test_normalizer():

# performance should be the same
for metric in ["image_AUROC", "image_F1"]:
assert results_without_normalization[0][metric] == results_with_cdf_normalization[0][metric]
assert results_without_normalization[0][metric] == results_with_minmax_normalization[0][metric]
assert round(results_without_normalization[0][metric], 3) == round(results_with_cdf_normalization[0][metric], 3)
assert round(results_without_normalization[0][metric], 3) == round(
results_with_minmax_normalization[0][metric], 3
)