Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add pre_train as a configurable parameter #431

Merged
merged 3 commits into from
Jul 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions anomalib/models/cflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ dataset:
model:
name: cflow
backbone: wide_resnet50_2
pre_trained: true
layers:
- layer2
- layer3
Expand Down
2 changes: 2 additions & 0 deletions anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
input_size: Tuple[int, int],
backbone: str,
layers: List[str],
pre_trained: bool = True,
fiber_batch_size: int = 64,
decoder: str = "freia-cflow",
condition_vector: int = 128,
Expand All @@ -56,6 +57,7 @@ def __init__(
self.model: CflowModel = CflowModel(
input_size=input_size,
backbone=backbone,
pre_trained=pre_trained,
layers=layers,
fiber_batch_size=fiber_batch_size,
decoder=decoder,
Expand Down
3 changes: 2 additions & 1 deletion anomalib/models/cflow/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def __init__(
input_size: Tuple[int, int],
backbone: str,
layers: List[str],
pre_trained: bool = True,
fiber_batch_size: int = 64,
decoder: str = "freia-cflow",
condition_vector: int = 128,
Expand All @@ -49,7 +50,7 @@ def __init__(
self.dec_arch = decoder
self.pool_layers = layers

self.encoder = FeatureExtractor(backbone=self.backbone(pretrained=True), layers=self.pool_layers)
self.encoder = FeatureExtractor(backbone=self.backbone(pretrained=pre_trained), layers=self.pool_layers)
self.pool_dims = self.encoder.out_dims
self.decoders = nn.ModuleList(
[
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/dfkde/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ dataset:
model:
name: dfkde
backbone: resnet18
pre_trained: true
max_training_points: 40000
confidence_threshold: 0.5
pre_processing: scale
Expand Down
3 changes: 3 additions & 0 deletions anomalib/models/dfkde/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class Dfkde(AnomalyModule):

Args:
backbone (str): Pre-trained model backbone.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
max_training_points (int, optional): Number of training points to fit the KDE model.
Defaults to 40000.
pre_processing (str, optional): Preprocess features before passing to KDE.
Expand All @@ -47,6 +48,7 @@ class Dfkde(AnomalyModule):
def __init__(
self,
backbone: str,
pre_trained: bool = True,
max_training_points: int = 40000,
pre_processing: str = "scale",
n_components: int = 16,
Expand All @@ -57,6 +59,7 @@ def __init__(

self.model = DfkdeModel(
backbone=backbone,
pre_trained=pre_trained,
n_comps=n_components,
pre_processing=pre_processing,
filter_count=max_training_points,
Expand Down
4 changes: 3 additions & 1 deletion anomalib/models/dfkde/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class DfkdeModel(nn.Module):

Args:
backbone (str): Pre-trained model backbone.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
n_comps (int, optional): Number of PCA components. Defaults to 16.
pre_processing (str, optional): Preprocess features before passing to KDE.
Options are between `norm` and `scale`. Defaults to "scale".
Expand All @@ -43,6 +44,7 @@ class DfkdeModel(nn.Module):
def __init__(
self,
backbone: str,
pre_trained: bool = True,
n_comps: int = 16,
pre_processing: str = "scale",
filter_count: int = 40000,
Expand All @@ -57,7 +59,7 @@ def __init__(
self.threshold_offset = threshold_offset

_backbone = getattr(torchvision.models, backbone)
self.feature_extractor = FeatureExtractor(backbone=_backbone(pretrained=True), layers=["avgpool"]).eval()
self.feature_extractor = FeatureExtractor(backbone=_backbone(pretrained=pre_trained), layers=["avgpool"]).eval()

self.pca_model = PCA(n_components=self.n_components)
self.kde_model = GaussianKDE()
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/dfm/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ dataset:
model:
name: dfm
backbone: resnet18
pre_trained: true
layer: layer3
pooling_kernel_size: 4
pca_level: 0.97
Expand Down
3 changes: 3 additions & 0 deletions anomalib/models/dfm/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ class Dfm(AnomalyModule):
Args:
backbone (str): Backbone CNN network
layer (str): Layer to extract features from the backbone CNN
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
pooling_kernel_size (int, optional): Kernel size to pool features extracted from the CNN.
Defaults to 4.
pca_level (float, optional): Ratio from which number of components for PCA are calculated.
Expand All @@ -48,6 +49,7 @@ def __init__(
self,
backbone: str,
layer: str,
pre_trained: bool = True,
pooling_kernel_size: int = 4,
pca_level: float = 0.97,
score_type: str = "fre",
Expand All @@ -56,6 +58,7 @@ def __init__(

self.model: DFMModel = DFMModel(
backbone=backbone,
pre_trained=pre_trained,
layer=layer,
pooling_kernel_size=pooling_kernel_size,
n_comps=pca_level,
Expand Down
13 changes: 10 additions & 3 deletions anomalib/models/dfm/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,13 +87,20 @@ class DFMModel(nn.Module):
Args:
backbone (str): Pre-trained model backbone.
layer (str): Layer from which to extract features.
pooling_kernel_size (int): Kernel size to pool features extracted from the CNN.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
pooling_kernel_size (int, optional): Kernel size to pool features extracted from the CNN.
n_comps (float, optional): Ratio from which number of components for PCA are calculated. Defaults to 0.97.
score_type (str, optional): Scoring type. Options are `fre` and `nll`. Defaults to "fre".
"""

def __init__(
self, backbone: str, layer: str, pooling_kernel_size: int, n_comps: float = 0.97, score_type: str = "fre"
self,
backbone: str,
layer: str,
pre_trained: bool = True,
pooling_kernel_size: int = 4,
n_comps: float = 0.97,
score_type: str = "fre",
):
super().__init__()
self.backbone = getattr(torchvision.models, backbone)
Expand All @@ -102,7 +109,7 @@ def __init__(
self.pca_model = PCA(n_components=self.n_components)
self.gaussian_model = SingleClassGaussian()
self.score_type = score_type
self.feature_extractor = FeatureExtractor(backbone=self.backbone(pretrained=True), layers=[layer]).eval()
self.feature_extractor = FeatureExtractor(backbone=self.backbone(pretrained=pre_trained), layers=[layer]).eval()

def fit(self, dataset: Tensor) -> None:
"""Fit a pca transformation and a Gaussian model to dataset.
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/fastflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ dataset:
model:
name: fastflow
backbone: resnet18 # options: [resnet18, wide_resnet50_2, cait_m48_448, deit_base_distilled_patch16_384]
pre_trained: true
flow_steps: 8 # options: [8, 8, 20, 20] - for each supported backbone
hidden_ratio: 1.0 # options: [1.0, 1.0, 0.16, 0.16] - for each supported backbone
conv3x3_only: True # options: [True, False, False, False] - for each supported backbone
Expand Down
7 changes: 5 additions & 2 deletions anomalib/models/fastflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ class Fastflow(AnomalyModule):
Args:
input_size (Tuple[int, int]): Model input size.
backbone (str): Backbone CNN network
flow_steps (int): Flow steps.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
flow_steps (int, optional): Flow steps.
conv3x3_only (bool, optinoal): Use only conv3x3 in fast_flow model. Defaults to False.
hidden_ratio (float, optional): Ratio to calculate hidden var channels. Defaults to 1.0.
"""
Expand All @@ -33,7 +34,8 @@ def __init__(
self,
input_size: Tuple[int, int],
backbone: str,
flow_steps: int,
pre_trained: bool = True,
flow_steps: int = 8,
conv3x3_only: bool = False,
hidden_ratio: float = 1.0,
):
Expand All @@ -42,6 +44,7 @@ def __init__(
self.model = FastflowModel(
input_size=input_size,
backbone=backbone,
pre_trained=pre_trained,
flow_steps=flow_steps,
conv3x3_only=conv3x3_only,
hidden_ratio=hidden_ratio,
Expand Down
10 changes: 6 additions & 4 deletions anomalib/models/fastflow/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,8 @@ class FastflowModel(nn.Module):
Args:
input_size (Tuple[int, int]): Model input size.
backbone (str): Backbone CNN network
flow_steps (int): Flow steps.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
flow_steps (int, optional): Flow steps.
conv3x3_only (bool, optinoal): Use only conv3x3 in fast_flow model. Defaults to False.
hidden_ratio (float, optional): Ratio to calculate hidden var channels. Defaults to 1.0.

Expand All @@ -106,7 +107,8 @@ def __init__(
self,
input_size: Tuple[int, int],
backbone: str,
flow_steps: int,
pre_trained: bool = True,
flow_steps: int = 8,
conv3x3_only: bool = False,
hidden_ratio: float = 1.0,
) -> None:
Expand All @@ -115,13 +117,13 @@ def __init__(
self.input_size = input_size

if backbone in ["cait_m48_448", "deit_base_distilled_patch16_384"]:
self.feature_extractor = timm.create_model(backbone, pretrained=True)
self.feature_extractor = timm.create_model(backbone, pretrained=pre_trained)
channels = [768]
scales = [16]
elif backbone in ["resnet18", "wide_resnet50_2"]:
self.feature_extractor = timm.create_model(
backbone,
pretrained=True,
pretrained=pre_trained,
features_only=True,
out_indices=[1, 2, 3],
)
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/padim/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ dataset:
model:
name: padim
backbone: resnet18
pre_trained: true
layers:
- layer1
- layer2
Expand Down
3 changes: 3 additions & 0 deletions anomalib/models/padim/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,23 @@ class Padim(AnomalyModule):
layers (List[str]): Layers to extract features from the backbone CNN
input_size (Tuple[int, int]): Size of the model input.
backbone (str): Backbone CNN network
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
"""

def __init__(
self,
layers: List[str],
input_size: Tuple[int, int],
backbone: str,
pre_trained: bool = True,
):
super().__init__()

self.layers = layers
self.model: PadimModel = PadimModel(
input_size=input_size,
backbone=backbone,
pre_trained=pre_trained,
layers=layers,
).eval()

Expand Down
4 changes: 3 additions & 1 deletion anomalib/models/padim/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,20 +39,22 @@ class PadimModel(nn.Module):
input_size (Tuple[int, int]): Input size for the model.
layers (List[str]): Layers used for feature extraction
backbone (str, optional): Pre-trained model backbone. Defaults to "resnet18".
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
"""

def __init__(
self,
input_size: Tuple[int, int],
layers: List[str],
backbone: str = "resnet18",
pre_trained: bool = True,
):
super().__init__()
self.tiler: Optional[Tiler] = None

self.backbone = getattr(torchvision.models, backbone)
self.layers = layers
self.feature_extractor = FeatureExtractor(backbone=self.backbone(pretrained=True), layers=self.layers)
self.feature_extractor = FeatureExtractor(backbone=self.backbone(pretrained=pre_trained), layers=self.layers)
self.dims = DIMS[backbone]
# pylint: disable=not-callable
# Since idx is randomly selected, save it with model to get same results
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/patchcore/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ dataset:
model:
name: patchcore
backbone: wide_resnet50_2
pre_trained: true
layers:
- layer2
- layer3
Expand Down
3 changes: 3 additions & 0 deletions anomalib/models/patchcore/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ class Patchcore(AnomalyModule):
input_size (Tuple[int, int]): Size of the model input.
backbone (str): Backbone CNN network
layers (List[str]): Layers to extract features from the backbone CNN
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
coreset_sampling_ratio (float, optional): Coreset sampling ratio to subsample embedding.
Defaults to 0.1.
num_neighbors (int, optional): Number of nearest neighbors. Defaults to 9.
Expand All @@ -49,6 +50,7 @@ def __init__(
input_size: Tuple[int, int],
backbone: str,
layers: List[str],
pre_trained: bool = True,
coreset_sampling_ratio: float = 0.1,
num_neighbors: int = 9,
) -> None:
Expand All @@ -57,6 +59,7 @@ def __init__(
self.model: PatchcoreModel = PatchcoreModel(
input_size=input_size,
backbone=backbone,
pre_trained=pre_trained,
layers=layers,
num_neighbors=num_neighbors,
)
Expand Down
3 changes: 2 additions & 1 deletion anomalib/models/patchcore/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def __init__(
input_size: Tuple[int, int],
layers: List[str],
backbone: str = "wide_resnet50_2",
pre_trained: bool = True,
num_neighbors: int = 9,
) -> None:
super().__init__()
Expand All @@ -48,7 +49,7 @@ def __init__(
self.input_size = input_size
self.num_neighbors = num_neighbors

self.feature_extractor = FeatureExtractor(backbone=self.backbone(pretrained=True), layers=self.layers)
self.feature_extractor = FeatureExtractor(backbone=self.backbone(pretrained=pre_trained), layers=self.layers)
self.feature_pooler = torch.nn.AvgPool2d(3, 1, 1)
self.anomaly_map_generator = AnomalyMapGenerator(input_size=input_size)

Expand Down
1 change: 1 addition & 0 deletions anomalib/models/reverse_distillation/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ model:
name: reverse_distillation
lr: 0.005
backbone: wide_resnet50_2
pre_trained: true
layers:
- layer1
- layer2
Expand Down
8 changes: 7 additions & 1 deletion anomalib/models/reverse_distillation/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ class ReverseDistillation(AnomalyModule):
input_size (Tuple[int, int]): Size of model input
backbone (str): Backbone of CNN network
layers (List[str]): Layers to extract features from the backbone CNN
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
"""

def __init__(
Expand All @@ -49,10 +50,15 @@ def __init__(
lr: float,
beta1: float,
beta2: float,
pre_trained: bool = True,
):
super().__init__()
self.model = ReverseDistillationModel(
backbone=backbone, layers=layers, input_size=input_size, anomaly_map_mode=anomaly_map_mode
backbone=backbone,
pre_trained=pre_trained,
layers=layers,
input_size=input_size,
anomaly_map_mode=anomaly_map_mode,
)
self.loss = ReverseDistillationLoss()
# TODO: LR should be part of optimizer in config.yaml! Since reverse distillation has custom
Expand Down
12 changes: 10 additions & 2 deletions anomalib/models/reverse_distillation/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,23 @@ class ReverseDistillationModel(nn.Module):
input_size (Tuple[int, int]): Size of input image
layers (List[str]): Name of layers from which the features are extracted.
anomaly_map_mode (str): Mode used to generate anomaly map. Options are between ``multiply`` and ``add``.
pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone.
"""

def __init__(self, backbone: str, input_size: Tuple[int, int], layers: List[str], anomaly_map_mode: str):
def __init__(
self,
backbone: str,
input_size: Tuple[int, int],
layers: List[str],
anomaly_map_mode: str,
pre_trained: bool = True,
):
super().__init__()
self.tiler: Optional[Tiler] = None

encoder_backbone = getattr(torchvision.models, backbone)
# TODO replace with TIMM feature extractor
self.encoder = FeatureExtractor(backbone=encoder_backbone(pretrained=True), layers=layers)
self.encoder = FeatureExtractor(backbone=encoder_backbone(pretrained=pre_trained), layers=layers)
self.bottleneck = get_bottleneck_layer(backbone)
self.decoder = get_decoder(backbone)

Expand Down
Loading