Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump up the pytorch lightning to master branch due to vulnurability issues #55

Merged
merged 35 commits into from
Feb 7, 2022
Merged
Show file tree
Hide file tree
Changes from 34 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
56c6014
Bump up the pytorch lightning to master branch due to vulnurability i…
samet-akcay Dec 30, 2021
7612c28
Merge branch 'development' of github.com:openvinotoolkit/anomalib int…
samet-akcay Jan 6, 2022
1fef827
Updated config files
samet-akcay Jan 7, 2022
ec10d61
Added trainer.validate method to tools/train.py
samet-akcay Jan 7, 2022
3927ee5
address mypy issues
samet-akcay Jan 7, 2022
2eebe2d
added Inference Dataset
samet-akcay Jan 7, 2022
8634dcc
Cleanup and polish
samet-akcay Jan 7, 2022
a699de9
fix deterministic issue
samet-akcay Jan 7, 2022
9e02c2a
isort
samet-akcay Jan 7, 2022
dcf8748
Merge branch 'development' of github.com:openvinotoolkit/anomalib int…
samet-akcay Jan 10, 2022
8db78d1
Modified config files
samet-akcay Jan 14, 2022
54b6b62
Resolved conflicts
samet-akcay Jan 25, 2022
5db96c3
pl version to 1.5.9
samet-akcay Jan 25, 2022
1621098
modified cflow configs
samet-akcay Jan 25, 2022
5b4abfc
set deterministic off
samet-akcay Jan 27, 2022
37b43a0
do not log images when testing.
samet-akcay Jan 27, 2022
e8a12b0
Bumped up version
samet-akcay Jan 27, 2022
06e6655
Update tox.yml
samet-akcay Jan 27, 2022
fe8a8a8
Removed export cublas
samet-akcay Jan 27, 2022
2042687
removed abc class from mvtec. predict-dataloader implemented
samet-akcay Jan 27, 2022
aa40d04
Add trainer.validate to `run_train_test` in normalization tests
samet-akcay Jan 27, 2022
d0eb02b
Merge branch 'fix/sa/update-pytorch-lightning-to-master' of github.co…
samet-akcay Jan 27, 2022
a3e8daa
revert numpy version
samet-akcay Jan 27, 2022
2ad9905
Fixed padim training
samet-akcay Jan 28, 2022
53cc1f1
Patchcore is now supported as well
samet-akcay Jan 28, 2022
4476719
DFKDE is now supported as well
samet-akcay Jan 28, 2022
1cdfcd9
DFM is now supported as well
samet-akcay Jan 28, 2022
78d35db
STFPM support and modify tests
samet-akcay Jan 28, 2022
271a96f
revert numpy version
samet-akcay Jan 28, 2022
7135e1c
revert some tests
samet-akcay Jan 28, 2022
ee2d37a
nncf callback
samet-akcay Jan 29, 2022
623f4ed
use on_validation_epoch_start instead of on_validation_start in cdf c…
djdameln Feb 2, 2022
df2914b
Merge branch 'development' of github.com:openvinotoolkit/anomalib int…
samet-akcay Feb 2, 2022
62f1abf
Updated ganomaly configs to match the version
samet-akcay Feb 2, 2022
50100f7
Remove commented lines.
samet-akcay Feb 7, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions anomalib/core/callbacks/cdf_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@ def on_test_start(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) ->
pl_module.image_metrics.F1.threshold = 0.5
pl_module.pixel_metrics.F1.threshold = 0.5

def on_train_epoch_end(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, _unused: Optional[Any] = None
) -> None:
"""Called when the train epoch ends.
def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the validation starts after training.

Use the current model to compute the anomaly score distributions
of the normal training data. This is needed after every epoch, because the statistics must be
Expand Down
22 changes: 11 additions & 11 deletions anomalib/core/callbacks/nncf_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
from pytorch_lightning import Callback
from torch.utils.data.dataloader import DataLoader

from anomalib.data import get_datamodule


def criterion_fn(outputs, criterion):
"""Calls the criterion function on outputs."""
Expand Down Expand Up @@ -76,21 +74,18 @@ def __init__(self, config: Union[ListConfig, DictConfig], dirpath: str, filename
self.dirpath = dirpath
self.filename = filename

# we need to create a datamodule here to obtain the init loader
datamodule = get_datamodule(config)
datamodule.setup()
self.train_loader = datamodule.train_dataloader()

self.comp_ctrl: Optional[CompressionAlgorithmController] = None
self.compression_scheduler: CompressionScheduler

def setup(self, _: pl.Trainer, pl_module: pl.LightningModule, __: Optional[str] = None) -> None:
def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None) -> None:
"""Call when fit or test begins.

Takes the pytorch model and wraps it using the compression controller so that it is ready for nncf fine-tuning.
"""
if self.comp_ctrl is None:
init_loader = InitLoader(self.train_loader)
# NOTE: trainer.datamodule returns the following error
# "Trainer" has no attribute "datamodule" [attr-defined]
init_loader = InitLoader(trainer.datamodule.train_dataloader()) # type: ignore
nncf_config = register_default_init_args(
self.nncf_config, init_loader, pl_module.model.loss, criterion_fn=criterion_fn
)
Expand All @@ -99,7 +94,12 @@ def setup(self, _: pl.Trainer, pl_module: pl.LightningModule, __: Optional[str]
self.compression_scheduler = self.comp_ctrl.scheduler

def on_train_batch_start(
self, trainer, _pl_module: pl.LightningModule, _batch: Any, _batch_idx: int, _dataloader_idx: int
self,
trainer: pl.Trainer,
_pl_module: pl.LightningModule,
_batch: Any,
_batch_idx: int,
_unused: Optional[int] = 0,
) -> None:
"""Call when the train batch begins.

Expand All @@ -109,7 +109,7 @@ def on_train_batch_start(
if self.comp_ctrl is not None:
trainer.model.loss_val = self.comp_ctrl.loss()

def on_train_end(self, _trainer, _pl_module: pl.LightningModule) -> None:
def on_train_end(self, _trainer: pl.Trainer, _pl_module: pl.LightningModule) -> None:
"""Call when the train ends.

Exports onnx model and if compression controller is not None, uses the onnx model to generate the OpenVINO IR.
Expand Down
9 changes: 5 additions & 4 deletions anomalib/core/model/anomaly_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
# See the License for the specific language governing permissions
# and limitations under the License.

from typing import List, Union
from abc import ABC
from typing import Any, List, Optional, Union

import pytorch_lightning as pl
from omegaconf import DictConfig, ListConfig
Expand All @@ -30,7 +31,7 @@
)


class AnomalyModule(pl.LightningModule):
class AnomalyModule(pl.LightningModule, ABC):
"""AnomalyModule to train, validate, predict and test images.

Acts as a base class for all the Anomaly Modules in the library.
Expand Down Expand Up @@ -77,7 +78,7 @@ def validation_step(self, batch, batch_idx) -> dict: # type: ignore # pylint:
"""To be implemented in the subclasses."""
raise NotImplementedError

def predict_step(self, batch, batch_idx, _): # pylint: disable=arguments-differ, signature-differs
def predict_step(self, batch: Any, batch_idx: int, _dataloader_idx: Optional[int] = None) -> Any:
"""Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.

By default, it calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`.
Expand All @@ -86,7 +87,7 @@ def predict_step(self, batch, batch_idx, _): # pylint: disable=arguments-differ
Args:
batch (Tensor): Current batch
batch_idx (int): Index of current batch
dataloader_idx (int): Index of the current dataloader
_dataloader_idx (int): Index of the current dataloader

Return:
Predicted output
Expand Down
4 changes: 4 additions & 0 deletions anomalib/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from omegaconf import DictConfig, ListConfig
from pytorch_lightning import LightningDataModule

from .inference import InferenceDataset
from .mvtec import MVTecDataModule


Expand Down Expand Up @@ -48,3 +49,6 @@ def get_datamodule(config: Union[DictConfig, ListConfig]) -> LightningDataModule
raise ValueError("Unknown dataset!")

return datamodule


__all__ = ["get_datamodule", "InferenceDataset"]
67 changes: 67 additions & 0 deletions anomalib/data/inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
"""Inference Dataset."""

# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.

from pathlib import Path
from typing import Any, Optional, Tuple, Union

import albumentations as A
from torch.utils.data.dataset import Dataset

from anomalib.data.transforms import PreProcessor
from anomalib.data.utils import get_image_filenames, read_image


class InferenceDataset(Dataset):
"""Inference Dataset to perform prediction."""

def __init__(
self,
path: Union[str, Path],
pre_process: Optional[PreProcessor] = None,
image_size: Optional[Union[int, Tuple[int, int]]] = None,
transform_config: Optional[Union[str, A.Compose]] = None,
) -> None:
"""Inference Dataset to perform prediction.

Args:
path (Union[str, Path]): Path to an image or image-folder.
pre_process (Optional[PreProcessor], optional): Pre-Processing transforms to
pre-process the input dataset. Defaults to None.
image_size (Optional[Union[int, Tuple[int, int]]], optional): Target image size
to resize the original image. Defaults to None.
transform_config (Optional[Union[str, A.Compose]], optional): Configuration file
parse the albumentation transforms. Defaults to None.
"""
super().__init__()

self.image_filenames = get_image_filenames(path)

if pre_process is None:
self.pre_process = PreProcessor(transform_config, image_size)
else:
self.pre_process = pre_process

def __len__(self) -> int:
"""Get the number of images in the given path."""
return len(self.image_filenames)

def __getitem__(self, index: int) -> Any:
"""Get the image based on the `index`."""
image_filename = self.image_filenames[index]
image = read_image(path=image_filename)
pre_processed = self.pre_process(image=image)

return pre_processed
43 changes: 31 additions & 12 deletions anomalib/data/mvtec.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,13 @@
import pandas as pd
from pandas.core.frame import DataFrame
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision.datasets.folder import VisionDataset

from anomalib.data.inference import InferenceDataset
from anomalib.data.transforms import PreProcessor
from anomalib.data.utils import read_image
from anomalib.utils.download_progress_bar import DownloadProgressBar
Expand Down Expand Up @@ -417,8 +419,10 @@ def __init__(
self.root = root if isinstance(root, Path) else Path(root)
self.category = category
self.dataset_path = self.root / self.category
self.transform_config = transform_config
self.image_size = image_size

self.pre_process = PreProcessor(config=transform_config, image_size=image_size)
self.pre_process = PreProcessor(config=self.transform_config, image_size=self.image_size)

self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
Expand All @@ -431,13 +435,25 @@ def __init__(
self.test_data: Dataset
if create_validation_set:
self.val_data: Dataset
self.inference_data: Dataset

def setup(self, stage: Optional[str] = None) -> None:
"""Setup train, validation and test data.

Args:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
if stage in (None, "fit"):
self.train_data = MVTec(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="train",
seed=self.seed,
create_validation_set=self.create_validation_set,
)

if self.create_validation_set:
self.val_data = MVTec(
root=self.root,
Expand All @@ -447,6 +463,7 @@ def setup(self, stage: Optional[str] = None) -> None:
seed=self.seed,
create_validation_set=self.create_validation_set,
)

self.test_data = MVTec(
root=self.root,
category=self.category,
Expand All @@ -455,25 +472,27 @@ def setup(self, stage: Optional[str] = None) -> None:
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if stage in (None, "fit"):
self.train_data = MVTec(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="train",
seed=self.seed,
create_validation_set=self.create_validation_set,

if stage == "predict":
self.inference_data = InferenceDataset(
path=self.root, image_size=self.image_size, transform_config=self.transform_config
)

def train_dataloader(self) -> DataLoader:
def train_dataloader(self) -> TRAIN_DATALOADERS:
"""Get train dataloader."""
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers)

def val_dataloader(self) -> DataLoader:
def val_dataloader(self) -> EVAL_DATALOADERS:
"""Get validation dataloader."""
dataset = self.val_data if self.create_validation_set else self.test_data
return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)

def test_dataloader(self) -> DataLoader:
def test_dataloader(self) -> EVAL_DATALOADERS:
"""Get test dataloader."""
return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)

def predict_dataloader(self) -> EVAL_DATALOADERS:
"""Get predict dataloader."""
return DataLoader(
self.inference_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers
)
30 changes: 29 additions & 1 deletion anomalib/data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,38 @@
# and limitations under the License.

from pathlib import Path
from typing import Union
from typing import List, Union

import cv2
import numpy as np
from torchvision.datasets.folder import IMG_EXTENSIONS


def get_image_filenames(path: Union[str, Path]) -> List[str]:
"""Get image filenames.

Args:
path (Union[str, Path]): Path to image or image-folder.

Returns:
List[str]: List of image filenames

"""
image_filenames: List[str]

if isinstance(path, str):
path = Path(path)

if path.is_file() and path.suffix in IMG_EXTENSIONS:
image_filenames = [str(path)]

if path.is_dir():
image_filenames = [str(p) for p in path.glob("**/*") if p.suffix in IMG_EXTENSIONS]

if len(image_filenames) == 0:
raise ValueError(f"Found 0 images in {path}")

return image_filenames


def read_image(path: Union[str, Path]) -> np.ndarray:
Expand Down
11 changes: 3 additions & 8 deletions anomalib/models/cflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ dataset:
format: mvtec
path: ./datasets/MVTec
url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz
category: leather
category: bottle
task: segmentation
label_format: None
image_size: 256
Expand Down Expand Up @@ -47,18 +47,15 @@ trainer:
accelerator: null
accumulate_grad_batches: 1
amp_backend: native
amp_level: O2
auto_lr_find: false
auto_scale_batch_size: false
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
checkpoint_callback: true
default_root_dir: null
deterministic: true
distributed_backend: null
deterministic: false
fast_dev_run: false
flush_logs_every_n_steps: 100
gpus: 1
gradient_clip_val: 0
limit_predict_batches: 1.0
Expand All @@ -68,7 +65,7 @@ trainer:
log_every_n_steps: 50
log_gpu_memory: null
max_epochs: 50
max_steps: null
max_steps: -1
min_epochs: null
min_steps: null
move_metrics_to_cpu: false
Expand All @@ -83,14 +80,12 @@ trainer:
process_position: 0
profiler: null
progress_bar_refresh_rate: null
reload_dataloaders_every_epoch: false
replace_sampler_ddp: true
stochastic_weight_avg: false
sync_batchnorm: false
terminate_on_nan: false
tpu_cores: null
track_grad_norm: -1
truncated_bptt_steps: null
val_check_interval: 1.0
weights_save_path: null
weights_summary: top
Loading