Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add console logger #241

Merged
merged 39 commits into from
Apr 22, 2022
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
fdc6898
☑︎ Check if openvino is in `config.yaml` file.
samet-akcay Apr 14, 2022
0842a1a
🗑 Remove `update_device_config` from `get_config`
samet-akcay Apr 14, 2022
fa2e02f
➕ `devices` and 🗑 `gpus` from test configs
samet-akcay Apr 14, 2022
165ac35
➕ `devices` and 🗑 `gpus` from model configs.
samet-akcay Apr 14, 2022
2c43d6b
🗑 `terminate_on_nan` from trainer configs.
samet-akcay Apr 14, 2022
bd94354
➕ Added new Trainer config params and 🗑 deprecated ones.
samet-akcay Apr 14, 2022
6b7173a
🗑 callback
samet-akcay Apr 14, 2022
a7b58aa
🗑 device and set progress bar to true
samet-akcay Apr 14, 2022
9495ffb
🛠 Fix tests
samet-akcay Apr 14, 2022
aa250d7
➕ Added console logger to `anomalib.utils.logger`
samet-akcay Apr 19, 2022
193fed6
🏷 Rename `get_logger` to `get_experiment_logger`
samet-akcay Apr 20, 2022
ac6a3aa
➕ Added logger information to BTech dataset.
samet-akcay Apr 20, 2022
95a6080
➕ Added logger information to Folder dataset.
samet-akcay Apr 20, 2022
8d2fd7e
➕ Added logger information to Mvtec AD dataset.
samet-akcay Apr 20, 2022
5fa0758
Fix typos in data loggers.
samet-akcay Apr 20, 2022
ee8f9a5
✏️ Add logging info to lightning models.
samet-akcay Apr 20, 2022
7402631
✏️ Add logging info to callbacks.
samet-akcay Apr 20, 2022
88141ae
Added suppress warning as argument to train.py
samet-akcay Apr 20, 2022
8a82bb4
Update Padim lightning module
samet-akcay Apr 20, 2022
fc0f600
🗑️ Remove print statements from patchcore torch model.
samet-akcay Apr 20, 2022
283642c
✏️ a typo in model load checkpoint
samet-akcay Apr 20, 2022
c5e3aa0
🔁 Replace prints with logger.info
samet-akcay Apr 20, 2022
bdac8a7
Updated console
samet-akcay Apr 20, 2022
722d576
Update train
samet-akcay Apr 20, 2022
0d825d1
pull dev
samet-akcay Apr 20, 2022
676d9c1
Merge branch 'development' of github.com:openvinotoolkit/anomalib int…
samet-akcay Apr 20, 2022
c72cb93
🔧 Update logger name in mvtec
samet-akcay Apr 20, 2022
0234536
🏷 Renamed get_logger to get_experiment_logger
samet-akcay Apr 20, 2022
07abb34
🤝 Merge branch 'development' and resolve conflicts.
samet-akcay Apr 21, 2022
8a90185
➕ Added console logger to train.py
samet-akcay Apr 21, 2022
f537e55
🛠 Modify data loggers
samet-akcay Apr 21, 2022
30cd9b1
🛠 Modify lightning model loggers
samet-akcay Apr 21, 2022
959ff1d
🛠 Modify torch model loggers
samet-akcay Apr 21, 2022
497eb74
🛠 Modify callback loggers
samet-akcay Apr 21, 2022
6e08b81
🗑 Removed console.py annd ➕ add get_console_logger to __init__
samet-akcay Apr 21, 2022
18df004
🛠 Modify train entrypoint
samet-akcay Apr 21, 2022
aaf9fb3
🏷 Renamed get_console_logger to configure_logger
samet-akcay Apr 22, 2022
190d2da
🔧 Made the changes to use logging.getLogger instead
samet-akcay Apr 22, 2022
1a0fb92
🤝 Resolve merge conflicts.
samet-akcay Apr 22, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions anomalib/data/btech.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
import shutil
import zipfile
from pathlib import Path
Expand All @@ -47,9 +46,9 @@
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
from anomalib.utils.loggers import get_console_logger

logger = logging.getLogger(name="Dataset: BTech")
logger.setLevel(logging.DEBUG)
logger = get_console_logger(__name__)


def make_btech_dataset(
Expand Down Expand Up @@ -349,23 +348,23 @@ def __init__(
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
logger.info("Found the dataset.")
else:
zip_filename = self.root.parent / "btad.zip"

logging.info("Downloading the BTech dataset.")
logger.info("Downloading the BTech dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="BTech") as progress_bar:
urlretrieve(
url="https://avires.dimi.uniud.it/papers/btad/btad.zip",
filename=zip_filename,
reporthook=progress_bar.update_to,
) # nosec

logging.info("Extracting the dataset.")
logger.info("Extracting the dataset.")
with zipfile.ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self.root.parent)

logging.info("Renaming the dataset directory")
logger.info("Renaming the dataset directory")
shutil.move(src=str(self.root.parent / "BTech_Dataset_transformed"), dst=str(self.root))

# NOTE: Each BTech category has different image extension as follows
Expand All @@ -377,13 +376,13 @@ def prepare_data(self) -> None:
# To avoid any conflict, the following script converts all the extensions to png.
# This solution works fine, but it's also possible to properly ready the bmp and
# png filenames from categories in `make_btech_dataset` function.
logging.info("Convert the bmp formats to png to have consistent image extensions")
logger.info("Convert the bmp formats to png to have consistent image extensions")
for filename in tqdm(self.root.glob("**/*.bmp"), desc="Converting bmp to png"):
image = cv2.imread(str(filename))
cv2.imwrite(str(filename.with_suffix(".png")), image)
filename.unlink()

logging.info("Cleaning the tar file")
logger.info("Cleaning the tar file")
zip_filename.unlink()

def setup(self, stage: Optional[str] = None) -> None:
Expand All @@ -396,6 +395,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = BTech(
root=self.root,
Expand Down
6 changes: 3 additions & 3 deletions anomalib/data/folder.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
from pathlib import Path
from typing import Dict, Optional, Tuple, Union

Expand All @@ -38,9 +37,9 @@
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
from anomalib.utils.loggers import get_console_logger

logger = logging.getLogger(name="Dataset: Folder Dataset")
logger.setLevel(logging.DEBUG)
logger = get_console_logger(__name__)


def _check_and_convert_path(path: Union[str, Path]) -> Path:
Expand Down Expand Up @@ -459,6 +458,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = FolderDataset(
normal_dir=self.normal_dir,
Expand Down
14 changes: 7 additions & 7 deletions anomalib/data/mvtec.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
import tarfile
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
Expand All @@ -63,9 +62,9 @@
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
from anomalib.utils.loggers import get_console_logger

logger = logging.getLogger(name="Dataset: MVTec AD")
logger.setLevel(logging.DEBUG)
logger = get_console_logger(__name__)


def make_mvtec_dataset(
Expand Down Expand Up @@ -372,24 +371,24 @@ def __init__(
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
logger.info("Found the dataset.")
else:
self.root.mkdir(parents=True, exist_ok=True)
dataset_name = "mvtec_anomaly_detection.tar.xz"

logging.info("Downloading the dataset.")
logger.info("Downloading the Mvtec AD dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="MVTec AD") as progress_bar:
urlretrieve(
url=f"ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/{dataset_name}",
filename=self.root / dataset_name,
reporthook=progress_bar.update_to,
)

logging.info("Extracting the dataset.")
logger.info("Extracting the dataset.")
with tarfile.open(self.root / dataset_name) as tar_file:
tar_file.extractall(self.root)

logging.info("Cleaning the tar file")
logger.info("Cleaning the tar file")
(self.root / dataset_name).unlink()

def setup(self, stage: Optional[str] = None) -> None:
Expand All @@ -399,6 +398,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = MVTec(
root=self.root,
Expand Down
8 changes: 5 additions & 3 deletions anomalib/deploy/inferencers/openvino.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import importlib
from importlib.util import find_spec
from pathlib import Path
from typing import Dict, Optional, Tuple, Union

Expand All @@ -26,8 +26,10 @@

from .base import Inferencer

if importlib.util.find_spec("openvino") is not None:
from openvino.inference_engine import IECore # pylint: disable=no-name-in-module
if find_spec("openvino") is not None:
from openvino.inference_engine import ( # type: ignore # pylint: disable=no-name-in-module
IECore,
)


class OpenVINOInferencer(Inferencer):
Expand Down
4 changes: 4 additions & 0 deletions anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
from anomalib.models.cflow.torch_model import CflowModel
from anomalib.models.cflow.utils import get_logp, positional_encoding_2d
from anomalib.models.components import AnomalyModule
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)

__all__ = ["CflowLightning"]

Expand All @@ -35,6 +38,7 @@ class CflowLightning(AnomalyModule):

def __init__(self, hparams):
super().__init__(hparams)
logger.info("Initializing Cflow Lightning model.")

self.model: CflowModel = CflowModel(hparams)
self.loss_val = 0
Expand Down
5 changes: 4 additions & 1 deletion anomalib/models/cflow/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@

from anomalib.models.components.freia.framework import SequenceINN
from anomalib.models.components.freia.modules import AllInOneBlock
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)


def get_logp(dim_feature_vector: int, p_u: torch.Tensor, logdet_j: torch.Tensor) -> torch.Tensor:
Expand Down Expand Up @@ -108,7 +111,7 @@ def cflow_head(
SequenceINN: decoder network block
"""
coder = SequenceINN(n_features)
print("CNF coder:", n_features)
logger.info("CNF coder: %d", n_features)
for _ in range(coupling_blocks):
coder.append(
AllInOneBlock,
Expand Down
5 changes: 5 additions & 0 deletions anomalib/models/dfkde/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,12 @@
from torch import Tensor

from anomalib.models.components import AnomalyModule
from anomalib.utils.loggers import get_console_logger

from .torch_model import DfkdeModel

logger = get_console_logger(__name__)


class DfkdeLightning(AnomalyModule):
"""DFKDE: Deep Feature Kernel Density Estimation.
Expand All @@ -34,6 +37,7 @@ class DfkdeLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing DFKDE Lightning model.")
threshold_steepness = 0.05
threshold_offset = 12

Expand Down Expand Up @@ -75,6 +79,7 @@ def on_validation_start(self) -> None:
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Fitting a KDE model to the embedding collected from the training set.")
self.model.fit(self.embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
5 changes: 4 additions & 1 deletion anomalib/models/dfkde/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
from torch import Tensor, nn

from anomalib.models.components import PCA, FeatureExtractor, GaussianKDE
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)


class DfkdeModel(nn.Module):
Expand Down Expand Up @@ -88,7 +91,7 @@ def fit(self, embeddings: List[Tensor]) -> bool:
_embeddings = torch.vstack(embeddings)

if _embeddings.shape[0] < self.n_components:
print("Not enough features to commit. Not making a model.")
logger.info("Not enough features to commit. Not making a model.")
return False

# if max training points is non-zero and smaller than number of staged features, select random subset
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/dfm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ trainer:
overfit_batches: 0.0
plugins: null
precision: 32

profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true
Expand Down
9 changes: 8 additions & 1 deletion anomalib/models/dfm/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,19 @@
from torch import Tensor

from anomalib.models.components import AnomalyModule
from anomalib.utils.loggers import get_console_logger

from .torch_model import DFMModel

logger = get_console_logger(__name__)


class DfmLightning(AnomalyModule):
"""DFM: Deep Featured Kernel Density Estimation."""

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing DFKDE Lightning model.")

self.model: DFMModel = DFMModel(
backbone=hparams.model.backbone,
Expand Down Expand Up @@ -66,11 +70,14 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ
self.embeddings.append(embedding)

def on_validation_start(self) -> None:
"""Fit a KDE Model to the embedding collected from the training set."""
"""Fit a PCA transformation and a Gaussian model to dataset."""
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Aggregating the embedding extracted from the training set.")
embeddings = torch.vstack(self.embeddings)

logger.info("Fitting a PCA and a Gaussian model to dataset.")
self.model.fit(embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/ganomaly/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ trainer:
overfit_batches: 0.0
plugins: null
precision: 32

profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true
Expand Down
6 changes: 6 additions & 0 deletions anomalib/models/ganomaly/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,12 @@

from anomalib.data.utils.image import pad_nextpow2
from anomalib.models.components import AnomalyModule
from anomalib.utils.loggers import get_console_logger

from .torch_model import GanomalyModel

logger = get_console_logger(__name__)


class GanomalyLightning(AnomalyModule):
"""PL Lightning Module for the GANomaly Algorithm.
Expand All @@ -39,6 +42,7 @@ class GanomalyLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing Ganomaly Lightning model.")

self.model: GanomalyModel = GanomalyModel(
input_size=hparams.model.input_size,
Expand Down Expand Up @@ -140,6 +144,7 @@ def validation_step(self, batch, _) -> Dict[str, Tensor]: # type: ignore # pyli

def validation_epoch_end(self, outputs):
"""Normalize outputs based on min/max values."""
logger.info("Normalizing validation outputs based on min/max values.")
for prediction in outputs:
prediction["pred_scores"] = self._normalize(prediction["pred_scores"])
super().validation_epoch_end(outputs)
Expand All @@ -159,6 +164,7 @@ def test_step(self, batch, _):

def test_epoch_end(self, outputs):
"""Normalize outputs based on min/max values."""
logger.info("Normalizing test outputs based on min/max values.")
for prediction in outputs:
prediction["pred_scores"] = self._normalize(prediction["pred_scores"])
super().test_epoch_end(outputs)
Expand Down
1 change: 0 additions & 1 deletion anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ trainer:
profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true

sync_batchnorm: false
tpu_cores: null
track_grad_norm: -1
Expand Down
8 changes: 8 additions & 0 deletions anomalib/models/padim/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@

from anomalib.models.components import AnomalyModule
from anomalib.models.padim.torch_model import PadimModel
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)

__all__ = ["PadimLightning"]

Expand All @@ -38,6 +41,8 @@ class PadimLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing Padim Lightning model.")

self.layers = hparams.model.layers
self.model: PadimModel = PadimModel(
layers=hparams.model.layers,
Expand Down Expand Up @@ -80,7 +85,10 @@ def on_validation_start(self) -> None:
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Aggregating the embedding extracted from the training set.")
embeddings = torch.vstack(self.embeddings)

logger.info("Fitting a Gaussian to the embedding collected from the training set.")
self.stats = self.model.gaussian.fit(embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
Loading