Skip to content

Commit

Permalink
Add console logger (#241)
Browse files Browse the repository at this point in the history
* ☑︎ Check if openvino is in `config.yaml` file.

* 🗑 Remove `update_device_config` from `get_config`

* ➕ `devices` and 🗑 `gpus` from test configs

* ➕ `devices` and 🗑 `gpus` from model configs.

* 🗑 `terminate_on_nan` from trainer configs.

* ➕ Added new Trainer config params and 🗑 deprecated ones.

* 🗑 callback

* 🗑 device and set progress bar to true

* 🛠  Fix tests

* ➕ Added console logger to `anomalib.utils.logger`

* 🏷  Rename `get_logger` to `get_experiment_logger`

* ➕ Added logger information to BTech dataset.

* ➕ Added logger information to Folder dataset.

* ➕ Added logger information to Mvtec AD dataset.

* Fix typos in data loggers.

* ✏️ Add logging info to lightning models.

* ✏️ Add logging info to callbacks.

* Added suppress warning as argument to train.py

* Update Padim lightning module

* 🗑️ Remove print statements from patchcore torch model.

* ✏️ a typo in model load checkpoint

* 🔁 Replace prints with logger.info

* Updated console

* Update train

* 🔧 Update logger name in mvtec

* 🏷 Renamed get_logger to get_experiment_logger

* ➕ Added console logger to train.py

* 🛠  Modify data loggers

* 🛠  Modify lightning model loggers

* 🛠  Modify torch model loggers

* 🛠  Modify callback loggers

* 🗑  Removed console.py annd ➕  add get_console_logger to __init__

* 🛠  Modify train entrypoint

* 🏷  Renamed get_console_logger to configure_logger

* 🔧 Made the changes to use logging.getLogger instead
  • Loading branch information
samet-akcay committed Apr 22, 2022
1 parent 26a9d49 commit ce279f9
Show file tree
Hide file tree
Showing 25 changed files with 176 additions and 54 deletions.
16 changes: 8 additions & 8 deletions anomalib/data/btech.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@
)
from anomalib.pre_processing import PreProcessor

logger = logging.getLogger(name="Dataset: BTech")
logger.setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)


def make_btech_dataset(
Expand Down Expand Up @@ -349,23 +348,23 @@ def __init__(
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
logger.info("Found the dataset.")
else:
zip_filename = self.root.parent / "btad.zip"

logging.info("Downloading the BTech dataset.")
logger.info("Downloading the BTech dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="BTech") as progress_bar:
urlretrieve(
url="https://avires.dimi.uniud.it/papers/btad/btad.zip",
filename=zip_filename,
reporthook=progress_bar.update_to,
) # nosec

logging.info("Extracting the dataset.")
logger.info("Extracting the dataset.")
with zipfile.ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self.root.parent)

logging.info("Renaming the dataset directory")
logger.info("Renaming the dataset directory")
shutil.move(src=str(self.root.parent / "BTech_Dataset_transformed"), dst=str(self.root))

# NOTE: Each BTech category has different image extension as follows
Expand All @@ -377,13 +376,13 @@ def prepare_data(self) -> None:
# To avoid any conflict, the following script converts all the extensions to png.
# This solution works fine, but it's also possible to properly ready the bmp and
# png filenames from categories in `make_btech_dataset` function.
logging.info("Convert the bmp formats to png to have consistent image extensions")
logger.info("Convert the bmp formats to png to have consistent image extensions")
for filename in tqdm(self.root.glob("**/*.bmp"), desc="Converting bmp to png"):
image = cv2.imread(str(filename))
cv2.imwrite(str(filename.with_suffix(".png")), image)
filename.unlink()

logging.info("Cleaning the tar file")
logger.info("Cleaning the tar file")
zip_filename.unlink()

def setup(self, stage: Optional[str] = None) -> None:
Expand All @@ -396,6 +395,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = BTech(
root=self.root,
Expand Down
4 changes: 2 additions & 2 deletions anomalib/data/folder.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@
)
from anomalib.pre_processing import PreProcessor

logger = logging.getLogger(name="Dataset: Folder Dataset")
logger.setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)


def _check_and_convert_path(path: Union[str, Path]) -> Path:
Expand Down Expand Up @@ -459,6 +458,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = FolderDataset(
normal_dir=self.normal_dir,
Expand Down
12 changes: 6 additions & 6 deletions anomalib/data/mvtec.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@
)
from anomalib.pre_processing import PreProcessor

logger = logging.getLogger(name="Dataset: MVTec AD")
logger.setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)


def make_mvtec_dataset(
Expand Down Expand Up @@ -372,24 +371,24 @@ def __init__(
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
logger.info("Found the dataset.")
else:
self.root.mkdir(parents=True, exist_ok=True)
dataset_name = "mvtec_anomaly_detection.tar.xz"

logging.info("Downloading the dataset.")
logger.info("Downloading the Mvtec AD dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="MVTec AD") as progress_bar:
urlretrieve(
url=f"ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/{dataset_name}",
filename=self.root / dataset_name,
reporthook=progress_bar.update_to,
)

logging.info("Extracting the dataset.")
logger.info("Extracting the dataset.")
with tarfile.open(self.root / dataset_name) as tar_file:
tar_file.extractall(self.root)

logging.info("Cleaning the tar file")
logger.info("Cleaning the tar file")
(self.root / dataset_name).unlink()

def setup(self, stage: Optional[str] = None) -> None:
Expand All @@ -399,6 +398,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = MVTec(
root=self.root,
Expand Down
5 changes: 5 additions & 0 deletions anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging

import einops
import torch
import torch.nn.functional as F
Expand All @@ -27,6 +29,8 @@
from anomalib.models.cflow.utils import get_logp, positional_encoding_2d
from anomalib.models.components import AnomalyModule

logger = logging.getLogger(__name__)

__all__ = ["CflowLightning"]


Expand All @@ -35,6 +39,7 @@ class CflowLightning(AnomalyModule):

def __init__(self, hparams):
super().__init__(hparams)
logger.info("Initializing Cflow Lightning model.")

self.model: CflowModel = CflowModel(hparams)
self.loss_val = 0
Expand Down
5 changes: 4 additions & 1 deletion anomalib/models/cflow/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
import math

import numpy as np
Expand All @@ -23,6 +24,8 @@
from anomalib.models.components.freia.framework import SequenceINN
from anomalib.models.components.freia.modules import AllInOneBlock

logger = logging.getLogger(__name__)


def get_logp(dim_feature_vector: int, p_u: torch.Tensor, logdet_j: torch.Tensor) -> torch.Tensor:
"""Returns the log likelihood estimation.
Expand Down Expand Up @@ -108,7 +111,7 @@ def cflow_head(
SequenceINN: decoder network block
"""
coder = SequenceINN(n_features)
print("CNF coder:", n_features)
logger.info("CNF coder: %d", n_features)
for _ in range(coupling_blocks):
coder.append(
AllInOneBlock,
Expand Down
5 changes: 5 additions & 0 deletions anomalib/models/dfkde/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
from typing import List, Union

from omegaconf.dictconfig import DictConfig
Expand All @@ -24,6 +25,8 @@

from .torch_model import DfkdeModel

logger = logging.getLogger(__name__)


class DfkdeLightning(AnomalyModule):
"""DFKDE: Deep Feature Kernel Density Estimation.
Expand All @@ -34,6 +37,7 @@ class DfkdeLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing DFKDE Lightning model.")
threshold_steepness = 0.05
threshold_offset = 12

Expand Down Expand Up @@ -75,6 +79,7 @@ def on_validation_start(self) -> None:
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Fitting a KDE model to the embedding collected from the training set.")
self.model.fit(self.embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
5 changes: 4 additions & 1 deletion anomalib/models/dfkde/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
import random
from typing import List, Optional, Tuple

Expand All @@ -23,6 +24,8 @@

from anomalib.models.components import PCA, FeatureExtractor, GaussianKDE

logger = logging.getLogger(__name__)


class DfkdeModel(nn.Module):
"""Normality Model for the DFKDE algorithm.
Expand Down Expand Up @@ -88,7 +91,7 @@ def fit(self, embeddings: List[Tensor]) -> bool:
_embeddings = torch.vstack(embeddings)

if _embeddings.shape[0] < self.n_components:
print("Not enough features to commit. Not making a model.")
logger.info("Not enough features to commit. Not making a model.")
return False

# if max training points is non-zero and smaller than number of staged features, select random subset
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/dfm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ trainer:
overfit_batches: 0.0
plugins: null
precision: 32

profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true
Expand Down
9 changes: 8 additions & 1 deletion anomalib/models/dfm/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
from typing import List, Union

import torch
Expand All @@ -24,12 +25,15 @@

from .torch_model import DFMModel

logger = logging.getLogger(__name__)


class DfmLightning(AnomalyModule):
"""DFM: Deep Featured Kernel Density Estimation."""

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing DFKDE Lightning model.")

self.model: DFMModel = DFMModel(
backbone=hparams.model.backbone,
Expand Down Expand Up @@ -66,11 +70,14 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ
self.embeddings.append(embedding)

def on_validation_start(self) -> None:
"""Fit a KDE Model to the embedding collected from the training set."""
"""Fit a PCA transformation and a Gaussian model to dataset."""
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Aggregating the embedding extracted from the training set.")
embeddings = torch.vstack(self.embeddings)

logger.info("Fitting a PCA and a Gaussian model to dataset.")
self.model.fit(embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/ganomaly/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ trainer:
overfit_batches: 0.0
plugins: null
precision: 32

profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true
Expand Down
6 changes: 6 additions & 0 deletions anomalib/models/ganomaly/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
from typing import Dict, List, Union

import torch
Expand All @@ -29,6 +30,8 @@

from .torch_model import GanomalyModel

logger = logging.getLogger(__name__)


class GanomalyLightning(AnomalyModule):
"""PL Lightning Module for the GANomaly Algorithm.
Expand All @@ -39,6 +42,7 @@ class GanomalyLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing Ganomaly Lightning model.")

self.model: GanomalyModel = GanomalyModel(
input_size=hparams.model.input_size,
Expand Down Expand Up @@ -140,6 +144,7 @@ def validation_step(self, batch, _) -> Dict[str, Tensor]: # type: ignore # pyli

def validation_epoch_end(self, outputs):
"""Normalize outputs based on min/max values."""
logger.info("Normalizing validation outputs based on min/max values.")
for prediction in outputs:
prediction["pred_scores"] = self._normalize(prediction["pred_scores"])
super().validation_epoch_end(outputs)
Expand All @@ -159,6 +164,7 @@ def test_step(self, batch, _):

def test_epoch_end(self, outputs):
"""Normalize outputs based on min/max values."""
logger.info("Normalizing test outputs based on min/max values.")
for prediction in outputs:
prediction["pred_scores"] = self._normalize(prediction["pred_scores"])
super().test_epoch_end(outputs)
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ trainer:
profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true

sync_batchnorm: false
tpu_cores: null
track_grad_norm: -1
Expand Down
8 changes: 8 additions & 0 deletions anomalib/models/padim/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
from typing import List, Union

import torch
Expand All @@ -26,6 +27,8 @@
from anomalib.models.components import AnomalyModule
from anomalib.models.padim.torch_model import PadimModel

logger = logging.getLogger(__name__)

__all__ = ["PadimLightning"]


Expand All @@ -38,6 +41,8 @@ class PadimLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing Padim Lightning model.")

self.layers = hparams.model.layers
self.model: PadimModel = PadimModel(
layers=hparams.model.layers,
Expand Down Expand Up @@ -80,7 +85,10 @@ def on_validation_start(self) -> None:
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Aggregating the embedding extracted from the training set.")
embeddings = torch.vstack(self.embeddings)

logger.info("Fitting a Gaussian to the embedding collected from the training set.")
self.stats = self.model.gaussian.fit(embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
Loading

0 comments on commit ce279f9

Please sign in to comment.