Skip to content

Commit

Permalink
move torch imports out of base quant
Browse files Browse the repository at this point in the history
  • Loading branch information
Satrat committed Oct 17, 2023
1 parent 595281f commit 85da44e
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 46 deletions.
40 changes: 0 additions & 40 deletions src/sparseml/modifiers/quantization/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,6 @@
from typing import Any, Dict, List, Optional

from sparseml.core import Event, Modifier, State
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationScheme,
QuantizationSchemeLoadable,
)


__all__ = ["QuantizationModifier"]
Expand Down Expand Up @@ -57,15 +53,6 @@ class QuantizationModifier(Modifier):
| model_fuse_fn_name: 'fuse_module'
| strict: True
:param scheme: Default QuantizationScheme to use when enabling quantization
in a module. May also be a dictionary to be loaded into the QuantizationScheme
class. A string alias may also be used, supported aliases:
['default', 'deepsparse', 'tensorrt'].
If None, the default scheme (`QuantizationScheme()`) will be used.
Default is None
:param scheme_overrides: optional mapping of module type names or submodule type
names to quantization schemes to override them with. If a scheme is mapped to
'default', then it will use the scheme set in the modifier scheme property
:param ignore: optional list of module class names or submodule names
to not quantize. Default is None
:param disable_quantization_observer_epoch: Epoch to disable updates to the module
Expand All @@ -85,8 +72,6 @@ class QuantizationModifier(Modifier):
scheme_overrides or ignore are not found in a given module. Default True
"""

scheme: Optional[QuantizationSchemeLoadable] = None
scheme_overrides: Optional[Dict[str, QuantizationSchemeLoadable]] = None
ignore: Optional[List[str]] = None
disable_quantization_observer_epoch: Optional[float] = None
freeze_bn_stats_epoch: Optional[float] = None
Expand All @@ -98,10 +83,6 @@ class QuantizationModifier(Modifier):

def __init__(self, **kwargs):
super().__init__(**kwargs)
self.scheme = QuantizationScheme.load(self.scheme)
self.scheme_overrides = _load_quantization_schemes_dict(
self.scheme_overrides, self.scheme
)
if self.model_fuse_fn_kwargs is None:
self.model_fuse_fn_kwargs = {}
if self.ignore is None:
Expand Down Expand Up @@ -158,24 +139,3 @@ def check_should_disable_observer(self, event: Event) -> bool:

def on_initialize_structure(self, state: State, **kwargs):
pass # nothing needed for this modifier


class _QuantizationSchemesDict(dict):
# wrapper class for dict to override the __str__ method for yaml serialization

def __str__(self):
return str({submodule: scheme.dict() for submodule, scheme in self.items()})


def _load_quantization_schemes_dict(
schemes_dict: Optional[Dict[str, QuantizationSchemeLoadable]],
default_scheme: QuantizationScheme,
) -> Dict[str, QuantizationScheme]:
if schemes_dict is None:
return {}
return _QuantizationSchemesDict(
{
submodule: QuantizationScheme.load(scheme, default=default_scheme)
for submodule, scheme in schemes_dict.items()
}
)
50 changes: 49 additions & 1 deletion src/sparseml/modifiers/quantization/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import logging
from itertools import cycle
from typing import Any, Callable
from typing import Any, Callable, Dict, Optional

import torch
from torch.nn import Module
Expand All @@ -26,6 +26,10 @@
freeze_bn_stats,
fuse_module_conv_bn_relus,
)
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationScheme,
QuantizationSchemeLoadable,
)
from sparseml.modifiers.quantization.utils.quantize import (
convert_module_qat_from_schemes,
raise_if_torch_quantization_not_available,
Expand All @@ -38,12 +42,35 @@


class QuantizationModifierPyTorch(QuantizationModifier):
"""
Pytorch-specific implementation of quantization modifier
:param scheme: Default QuantizationScheme to use when enabling quantization
in a module. May also be a dictionary to be loaded into the QuantizationScheme
class. A string alias may also be used, supported aliases:
['default', 'deepsparse', 'tensorrt'].
If None, the default scheme (`QuantizationScheme()`) will be used.
Default is None
:param scheme_overrides: optional mapping of module type names or submodule type
names to quantization schemes to override them with. If a scheme is mapped to
'default', then it will use the scheme set in the modifier scheme property
"""

scheme: Optional[QuantizationSchemeLoadable] = None
scheme_overrides: Optional[Dict[str, QuantizationSchemeLoadable]] = None
calibration_dataloader_: Any = None
calibration_function_: Any = None
qat_enabled_: bool = False
quantization_observer_disabled_: bool = False
bn_stats_frozen_: bool = False

def __init__(self, **kwargs):
super().__init__(**kwargs)
self.scheme = QuantizationScheme.load(self.scheme)
self.scheme_overrides = _load_quantization_schemes_dict(
self.scheme_overrides, self.scheme
)

def on_initialize(self, state: State, **kwargs) -> bool:
raise_if_torch_quantization_not_available()
if self.end and self.end != -1:
Expand Down Expand Up @@ -181,3 +208,24 @@ def _calibrate(self, module: Module):
module.train()
else:
self._disable_quantization_observer(module)


class _QuantizationSchemesDict(dict):
# wrapper class for dict to override the __str__ method for yaml serialization

def __str__(self):
return str({submodule: scheme.dict() for submodule, scheme in self.items()})


def _load_quantization_schemes_dict(
schemes_dict: Optional[Dict[str, QuantizationSchemeLoadable]],
default_scheme: QuantizationScheme,
) -> Dict[str, QuantizationScheme]:
if schemes_dict is None:
return {}
return _QuantizationSchemesDict(
{
submodule: QuantizationScheme.load(scheme, default=default_scheme)
for submodule, scheme in schemes_dict.items()
}
)
13 changes: 13 additions & 0 deletions tests/sparseml/modifiers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
File renamed without changes.
13 changes: 13 additions & 0 deletions tests/sparseml/modifiers/quantization/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from sparseml.core.factory import ModifierFactory
from sparseml.core.framework import Framework
from sparseml.modifiers.quantization import QuantizationModifier
from tests.sparseml.pytorch.modifiers.conf import setup_modifier_factory
from tests.sparseml.modifiers.conf import setup_modifier_factory


def test_quantization_registered():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,8 @@
is_qat_helper_module,
is_quantizable_module,
)
from tests.sparseml.modifiers.conf import LifecyleTestingHarness, setup_modifier_factory
from tests.sparseml.pytorch.helpers import ConvNet, LinearNet
from tests.sparseml.pytorch.modifiers.conf import (
LifecyleTestingHarness,
setup_modifier_factory,
)
from tests.sparseml.pytorch.sparsification.quantization.test_modifier_quantization import ( # noqa E501
_match_submodule_name_or_type,
_test_qat_wrapped_module,
Expand Down

0 comments on commit 85da44e

Please sign in to comment.