Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add console logger #241

Merged
merged 39 commits into from
Apr 22, 2022
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
fdc6898
☑︎ Check if openvino is in `config.yaml` file.
samet-akcay Apr 14, 2022
0842a1a
🗑 Remove `update_device_config` from `get_config`
samet-akcay Apr 14, 2022
fa2e02f
➕ `devices` and 🗑 `gpus` from test configs
samet-akcay Apr 14, 2022
165ac35
➕ `devices` and 🗑 `gpus` from model configs.
samet-akcay Apr 14, 2022
2c43d6b
🗑 `terminate_on_nan` from trainer configs.
samet-akcay Apr 14, 2022
bd94354
➕ Added new Trainer config params and 🗑 deprecated ones.
samet-akcay Apr 14, 2022
6b7173a
🗑 callback
samet-akcay Apr 14, 2022
a7b58aa
🗑 device and set progress bar to true
samet-akcay Apr 14, 2022
9495ffb
🛠 Fix tests
samet-akcay Apr 14, 2022
aa250d7
➕ Added console logger to `anomalib.utils.logger`
samet-akcay Apr 19, 2022
193fed6
🏷 Rename `get_logger` to `get_experiment_logger`
samet-akcay Apr 20, 2022
ac6a3aa
➕ Added logger information to BTech dataset.
samet-akcay Apr 20, 2022
95a6080
➕ Added logger information to Folder dataset.
samet-akcay Apr 20, 2022
8d2fd7e
➕ Added logger information to Mvtec AD dataset.
samet-akcay Apr 20, 2022
5fa0758
Fix typos in data loggers.
samet-akcay Apr 20, 2022
ee8f9a5
✏️ Add logging info to lightning models.
samet-akcay Apr 20, 2022
7402631
✏️ Add logging info to callbacks.
samet-akcay Apr 20, 2022
88141ae
Added suppress warning as argument to train.py
samet-akcay Apr 20, 2022
8a82bb4
Update Padim lightning module
samet-akcay Apr 20, 2022
fc0f600
🗑️ Remove print statements from patchcore torch model.
samet-akcay Apr 20, 2022
283642c
✏️ a typo in model load checkpoint
samet-akcay Apr 20, 2022
c5e3aa0
🔁 Replace prints with logger.info
samet-akcay Apr 20, 2022
bdac8a7
Updated console
samet-akcay Apr 20, 2022
722d576
Update train
samet-akcay Apr 20, 2022
0d825d1
pull dev
samet-akcay Apr 20, 2022
676d9c1
Merge branch 'development' of github.com:openvinotoolkit/anomalib int…
samet-akcay Apr 20, 2022
c72cb93
🔧 Update logger name in mvtec
samet-akcay Apr 20, 2022
0234536
🏷 Renamed get_logger to get_experiment_logger
samet-akcay Apr 20, 2022
07abb34
🤝 Merge branch 'development' and resolve conflicts.
samet-akcay Apr 21, 2022
8a90185
➕ Added console logger to train.py
samet-akcay Apr 21, 2022
f537e55
🛠 Modify data loggers
samet-akcay Apr 21, 2022
30cd9b1
🛠 Modify lightning model loggers
samet-akcay Apr 21, 2022
959ff1d
🛠 Modify torch model loggers
samet-akcay Apr 21, 2022
497eb74
🛠 Modify callback loggers
samet-akcay Apr 21, 2022
6e08b81
🗑 Removed console.py annd ➕ add get_console_logger to __init__
samet-akcay Apr 21, 2022
18df004
🛠 Modify train entrypoint
samet-akcay Apr 21, 2022
aaf9fb3
🏷 Renamed get_console_logger to configure_logger
samet-akcay Apr 22, 2022
190d2da
🔧 Made the changes to use logging.getLogger instead
samet-akcay Apr 22, 2022
1a0fb92
🤝 Resolve merge conflicts.
samet-akcay Apr 22, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions anomalib/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from typing import List, Optional, Union
from warnings import warn

import torch
from omegaconf import DictConfig, ListConfig, OmegaConf


Expand Down Expand Up @@ -112,37 +111,10 @@ def update_multi_gpu_training_config(config: Union[DictConfig, ListConfig]) -> U
return config


def update_device_config(config: Union[DictConfig, ListConfig], openvino: bool) -> Union[DictConfig, ListConfig]:
"""Update XPU Device Config This function ensures devices are configured correctly by the user.

Args:
config (Union[DictConfig, ListConfig]): Input config
openvino (bool): Boolean to check if OpenVINO Inference is enabled.

Returns:
Union[DictConfig, ListConfig]: Updated config
"""

config.openvino = openvino
if openvino:
config.trainer.gpus = 0

if not torch.cuda.is_available():
config.trainer.gpus = 0

if config.trainer.gpus == 0 and torch.cuda.is_available():
config.trainer.gpus = 1

config = update_multi_gpu_training_config(config)

return config


def get_configurable_parameters(
model_name: Optional[str] = None,
model_config_path: Optional[Union[Path, str]] = None,
weight_file: Optional[str] = None,
openvino: bool = False,
config_filename: Optional[str] = "config",
config_file_extension: Optional[str] = "yaml",
) -> Union[DictConfig, ListConfig]:
Expand All @@ -152,7 +124,6 @@ def get_configurable_parameters(
model_name: Optional[str]: (Default value = None)
model_config_path: Optional[Union[Path, str]]: (Default value = None)
weight_file: Path to the weight file
openvino: Use OpenVINO
config_filename: Optional[str]: (Default value = "config")
config_file_extension: Optional[str]: (Default value = "yaml")

Expand Down Expand Up @@ -191,7 +162,6 @@ def get_configurable_parameters(
config.model.weight_file = weight_file

config = update_nncf_config(config)
config = update_device_config(config, openvino)

# thresholding
if "pixel_default" not in config.model.threshold.keys():
Expand Down
18 changes: 9 additions & 9 deletions anomalib/data/btech.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
import shutil
import zipfile
from pathlib import Path
Expand All @@ -47,9 +46,9 @@
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
from anomalib.utils.loggers import get_console_logger

logger = logging.getLogger(name="Dataset: BTech")
logger.setLevel(logging.DEBUG)
logger = get_console_logger(__name__)


def make_btech_dataset(
Expand Down Expand Up @@ -349,23 +348,23 @@ def __init__(
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
logger.info("Found the dataset.")
else:
zip_filename = self.root.parent / "btad.zip"

logging.info("Downloading the BTech dataset.")
logger.info("Downloading the BTech dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="BTech") as progress_bar:
urlretrieve(
url="https://avires.dimi.uniud.it/papers/btad/btad.zip",
filename=zip_filename,
reporthook=progress_bar.update_to,
) # nosec

logging.info("Extracting the dataset.")
logger.info("Extracting the dataset.")
with zipfile.ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self.root.parent)

logging.info("Renaming the dataset directory")
logger.info("Renaming the dataset directory")
shutil.move(src=str(self.root.parent / "BTech_Dataset_transformed"), dst=str(self.root))

# NOTE: Each BTech category has different image extension as follows
Expand All @@ -377,13 +376,13 @@ def prepare_data(self) -> None:
# To avoid any conflict, the following script converts all the extensions to png.
# This solution works fine, but it's also possible to properly ready the bmp and
# png filenames from categories in `make_btech_dataset` function.
logging.info("Convert the bmp formats to png to have consistent image extensions")
logger.info("Convert the bmp formats to png to have consistent image extensions")
for filename in tqdm(self.root.glob("**/*.bmp"), desc="Converting bmp to png"):
image = cv2.imread(str(filename))
cv2.imwrite(str(filename.with_suffix(".png")), image)
filename.unlink()

logging.info("Cleaning the tar file")
logger.info("Cleaning the tar file")
zip_filename.unlink()

def setup(self, stage: Optional[str] = None) -> None:
Expand All @@ -396,6 +395,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = BTech(
root=self.root,
Expand Down
6 changes: 3 additions & 3 deletions anomalib/data/folder.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
from pathlib import Path
from typing import Dict, Optional, Tuple, Union

Expand All @@ -38,9 +37,9 @@
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
from anomalib.utils.loggers import get_console_logger

logger = logging.getLogger(name="Dataset: Folder Dataset")
logger.setLevel(logging.DEBUG)
logger = get_console_logger(__name__)


def _check_and_convert_path(path: Union[str, Path]) -> Path:
Expand Down Expand Up @@ -459,6 +458,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = FolderDataset(
normal_dir=self.normal_dir,
Expand Down
14 changes: 7 additions & 7 deletions anomalib/data/mvtec.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
# See the License for the specific language governing permissions
# and limitations under the License.

import logging
import tarfile
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
Expand All @@ -63,9 +62,9 @@
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
from anomalib.utils.loggers import get_console_logger

logger = logging.getLogger(name="Dataset: MVTec AD")
logger.setLevel(logging.DEBUG)
logger = get_console_logger(__name__)


def make_mvtec_dataset(
Expand Down Expand Up @@ -372,24 +371,24 @@ def __init__(
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
logger.info("Found the dataset.")
else:
self.root.mkdir(parents=True, exist_ok=True)
dataset_name = "mvtec_anomaly_detection.tar.xz"

logging.info("Downloading the dataset.")
logger.info("Downloading the Mvtec AD dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="MVTec AD") as progress_bar:
urlretrieve(
url=f"ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/{dataset_name}",
filename=self.root / dataset_name,
reporthook=progress_bar.update_to,
)

logging.info("Extracting the dataset.")
logger.info("Extracting the dataset.")
with tarfile.open(self.root / dataset_name) as tar_file:
tar_file.extractall(self.root)

logging.info("Cleaning the tar file")
logger.info("Cleaning the tar file")
(self.root / dataset_name).unlink()

def setup(self, stage: Optional[str] = None) -> None:
Expand All @@ -399,6 +398,7 @@ def setup(self, stage: Optional[str] = None) -> None:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)

"""
logger.info("Setting up train, validation, test and prediction datasets.")
if stage in (None, "fit"):
self.train_data = MVTec(
root=self.root,
Expand Down
2 changes: 1 addition & 1 deletion anomalib/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def get_model(config: Union[DictConfig, ListConfig]) -> AnomalyModule:
torch_model_list: List[str] = ["padim", "stfpm", "dfkde", "dfm", "patchcore", "cflow", "ganomaly"]
model: AnomalyModule

if config.openvino:
if "openvino" in config.keys() and config.openvino:
if config.model.name in openvino_model_list:
module = import_module(f"anomalib.models.{config.model.name}.model")
model = getattr(module, f"{config.model.name.capitalize()}OpenVINO")
Expand Down
13 changes: 4 additions & 9 deletions anomalib/models/cflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ project:

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: null
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
accumulate_grad_batches: 1
amp_backend: native
auto_lr_find: false
Expand All @@ -63,10 +63,12 @@ trainer:
benchmark: false
check_val_every_n_epoch: 1
default_root_dir: null
detect_anomaly: false
deterministic: false
enable_checkpointing: true
enable_progress_bar: true
fast_dev_run: false
gpus: 1
gpus: null # Set automatically
gradient_clip_val: 0
limit_predict_batches: 1.0
limit_test_batches: 1.0
Expand All @@ -86,16 +88,9 @@ trainer:
overfit_batches: 0.0
plugins: null
precision: 32
prepare_data_per_node: true
process_position: 0
profiler: null
progress_bar_refresh_rate: null
replace_sampler_ddp: true
stochastic_weight_avg: false
sync_batchnorm: false
terminate_on_nan: false
tpu_cores: null
track_grad_norm: -1
val_check_interval: 1.0
weights_save_path: null
weights_summary: top
4 changes: 4 additions & 0 deletions anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
from anomalib.models.cflow.torch_model import CflowModel
from anomalib.models.cflow.utils import get_logp, positional_encoding_2d
from anomalib.models.components import AnomalyModule
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)

__all__ = ["CflowLightning"]

Expand All @@ -35,6 +38,7 @@ class CflowLightning(AnomalyModule):

def __init__(self, hparams):
super().__init__(hparams)
logger.info("Initializing Cflow Lightning model.")

self.model: CflowModel = CflowModel(hparams)
self.loss_val = 0
Expand Down
5 changes: 4 additions & 1 deletion anomalib/models/cflow/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@

from anomalib.models.components.freia.framework import SequenceINN
from anomalib.models.components.freia.modules import AllInOneBlock
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)


def get_logp(dim_feature_vector: int, p_u: torch.Tensor, logdet_j: torch.Tensor) -> torch.Tensor:
Expand Down Expand Up @@ -108,7 +111,7 @@ def cflow_head(
SequenceINN: decoder network block
"""
coder = SequenceINN(n_features)
print("CNF coder:", n_features)
logger.info("CNF coder: %d", n_features)
for _ in range(coupling_blocks):
coder.append(
AllInOneBlock,
Expand Down
13 changes: 4 additions & 9 deletions anomalib/models/dfkde/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ project:

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: null
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
accumulate_grad_batches: 1
amp_backend: native
auto_lr_find: false
Expand All @@ -47,10 +47,12 @@ trainer:
benchmark: false
check_val_every_n_epoch: 1 # Don't validate before extracting features.
default_root_dir: null
detect_anomaly: false
deterministic: false
enable_checkpointing: true
enable_progress_bar: true
fast_dev_run: false
gpus: 1
gpus: null # Set automatically
gradient_clip_val: 0
limit_predict_batches: 1.0
limit_test_batches: 1.0
Expand All @@ -70,16 +72,9 @@ trainer:
overfit_batches: 0.0
plugins: null
precision: 32
prepare_data_per_node: true
process_position: 0
profiler: null
progress_bar_refresh_rate: null
replace_sampler_ddp: true
stochastic_weight_avg: false
sync_batchnorm: false
terminate_on_nan: false
tpu_cores: null
track_grad_norm: -1
val_check_interval: 1.0 # Don't validate before extracting features.
weights_save_path: null
weights_summary: top
5 changes: 5 additions & 0 deletions anomalib/models/dfkde/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,12 @@
from torch import Tensor

from anomalib.models.components import AnomalyModule
from anomalib.utils.loggers import get_console_logger

from .torch_model import DfkdeModel

logger = get_console_logger(__name__)


class DfkdeLightning(AnomalyModule):
"""DFKDE: Deep Feature Kernel Density Estimation.
Expand All @@ -34,6 +37,7 @@ class DfkdeLightning(AnomalyModule):

def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing DFKDE Lightning model.")
threshold_steepness = 0.05
threshold_offset = 12

Expand Down Expand Up @@ -75,6 +79,7 @@ def on_validation_start(self) -> None:
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Fitting a KDE model to the embedding collected from the training set.")
self.model.fit(self.embeddings)

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
5 changes: 4 additions & 1 deletion anomalib/models/dfkde/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
from torch import Tensor, nn

from anomalib.models.components import PCA, FeatureExtractor, GaussianKDE
from anomalib.utils.loggers import get_console_logger

logger = get_console_logger(__name__)


class DfkdeModel(nn.Module):
Expand Down Expand Up @@ -88,7 +91,7 @@ def fit(self, embeddings: List[Tensor]) -> bool:
_embeddings = torch.vstack(embeddings)

if _embeddings.shape[0] < self.n_components:
print("Not enough features to commit. Not making a model.")
logger.info("Not enough features to commit. Not making a model.")
return False

# if max training points is non-zero and smaller than number of staged features, select random subset
Expand Down
Loading