Skip to content

Commit

Permalink
Bump up pytorch-lightning version to 1.6.0 or higher (#193)
Browse files Browse the repository at this point in the history
* bump up pytorch-lightning version

* bump up to 1.6.0 or higher

* 🛠 Fix typo.

* 🛠 Fix mypy issues.

* 🔄 Switch soft permutation to false by default since this radically
slows down the training.

* 🗑 Remove `self.automatic_optimization=False` from dfkde to automatically set `trainer.global_step`

* 🗑 Remove `self.automatic_optimization=False` from dfm to automatically set `trainer.global_step`

* 🗑 Remove `self.automatic_optimization=False` from padim to automatically set `trainer.global_step`

* 🗑 Remove `self.automatic_optimization=False` from patchcore to automatically set `trainer.global_step`

* 🔄 replace `checkpoint_callback` with `enable_checkpointing`

* ✏️ Edit `check_val_every_n_epoch` from 2 ➡️ 1 to save the weights

* ✏️ Set `check_val_every_n_epoch` to 1 for `fast_run`.
  • Loading branch information
samet-akcay committed Apr 8, 2022
1 parent 548852f commit ce00b50
Show file tree
Hide file tree
Showing 15 changed files with 12 additions and 14 deletions.
2 changes: 1 addition & 1 deletion anomalib/models/cflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/dfkde/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1 # Don't validate before extracting features.
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/dfkde/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]):
hparams.model.backbone, hparams.model.max_training_points, threshold_steepness, threshold_offset
)

self.automatic_optimization = False
self.embeddings: List[Tensor] = []

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/dfm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1 # Don't validate before extracting features.
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/dfm/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]):
self.model: DFMModel = DFMModel(
backbone=hparams.model.backbone, n_comps=hparams.model.pca_level, score_type=hparams.model.score_type
)
self.automatic_optimization = False
self.embeddings: List[Tensor] = []

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/ganomaly/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 2
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1 # Don't validate before extracting features.
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/padim/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,6 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]):
).eval()

self.stats: List[Tensor] = []
self.automatic_optimization = False
self.embeddings: List[Tensor] = []

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/patchcore/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1 # Don't validate before extracting features.
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/patchcore/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,6 @@ def __init__(self, hparams) -> None:
backbone=hparams.model.backbone,
apply_tiling=hparams.dataset.tiling.apply,
)
self.automatic_optimization = False
self.embeddings: List[Tensor] = []

def configure_optimizers(self) -> None:
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/stfpm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ trainer:
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
checkpoint_callback: true
default_root_dir: null
deterministic: false
enable_checkpointing: true
fast_dev_run: false
gpus: 1
gradient_clip_val: 0
Expand Down
3 changes: 2 additions & 1 deletion anomalib/utils/callbacks/visualizer_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from anomalib.pre_processing.transforms import Denormalize
from anomalib.utils import loggers
from anomalib.utils.loggers import AnomalibWandbLogger
from anomalib.utils.loggers.base import ImageLoggerBase


class VisualizerCallback(Callback):
Expand Down Expand Up @@ -69,7 +70,7 @@ def _add_images(
for log_to in module.hparams.project.log_images_to:
if log_to in loggers.AVAILABLE_LOGGERS:
# check if logger object is same as the requested object
if log_to in logger_type and module.logger is not None:
if log_to in logger_type and module.logger is not None and isinstance(module.logger, ImageLoggerBase):
module.logger.add_image(
image=visualizer.figure,
name=filename.parent.name + "_" + filename.name,
Expand Down
2 changes: 1 addition & 1 deletion anomalib/utils/loggers/wandb.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(
anonymous: Optional[bool] = None,
version: Optional[str] = None,
project: Optional[str] = None,
log_model: Optional[bool] = False,
log_model: Union[str, bool] = False,
experiment=None,
prefix: Optional[str] = "",
sync_step: Optional[bool] = None,
Expand Down
2 changes: 1 addition & 1 deletion requirements/base.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ opencv-python>=4.5.3.56
opencv-contrib-python==4.5.5.62
pandas~=1.1.5
pillow==9.0.0
pytorch-lightning==1.5.9
pytorch-lightning>=1.6.0
torch==1.8.1
torchvision==0.9.1
torchtext==0.9.1
Expand Down
1 change: 1 addition & 0 deletions tests/helpers/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ def setup_model_train(
# Train the model.
if fast_run:
config.trainer.max_epochs = 1
config.trainer.check_val_every_n_epoch = 1

trainer = Trainer(callbacks=callbacks, **config.trainer)
trainer.fit(model=model, datamodule=datamodule)
Expand Down

0 comments on commit ce00b50

Please sign in to comment.