Skip to content
This repository was archived by the owner on Jun 3, 2025. It is now read-only.

Commit 0c3f29d

Browse files
committed
Disable logging for quantization (num_bits etc) by default
1 parent 8c58d98 commit 0c3f29d

File tree

1 file changed

+23
-2
lines changed

1 file changed

+23
-2
lines changed

src/sparseml/pytorch/sparsification/quantization/modifier_quantization.py

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ class QuantizationModifier(ScheduledModifier):
103103
| freeze_bn_stats_epoch: 3.0
104104
| model_fuse_fn_name: 'fuse_module'
105105
| strict: True
106+
| verbose: False
106107
107108
:param start_epoch: The epoch to start the modifier at
108109
:param scheme: Default QuantizationScheme to use when enabling quantization
@@ -133,6 +134,8 @@ class QuantizationModifier(ScheduledModifier):
133134
scheme_overrides or ignore are not found in a given module. Default True
134135
:param end_epoch: Disabled, setting to anything other than -1 will raise an
135136
exception. For compatibility with YAML serialization only.
137+
:param verbose: if True, will log detailed information such as number of bits, batch
138+
norm freezing etc. Default to False
136139
"""
137140

138141
def __init__(
@@ -148,6 +151,7 @@ def __init__(
148151
num_calibration_steps: Optional[int] = None,
149152
strict: bool = True,
150153
end_epoch: float = -1.0,
154+
verbose: bool = False,
151155
):
152156
raise_if_torch_quantization_not_available()
153157
if end_epoch != -1:
@@ -178,7 +182,7 @@ def __init__(
178182
self._model_fuse_fn_name = None
179183

180184
self._strict = strict
181-
185+
self._verbose = verbose
182186
self._qat_enabled = False
183187
self._quantization_observer_disabled = False
184188
self._bn_stats_frozen = False
@@ -348,6 +352,22 @@ def strict(self, value: bool):
348352
"""
349353
self._strict = value
350354

355+
@ModifierProp()
356+
def verbose(self) -> bool:
357+
"""
358+
:return: if True, will log detailed information such as number of bits, batch
359+
norm freezing etc
360+
"""
361+
return self._verbose
362+
363+
@strict.setter
364+
def verbose(self, value: bool):
365+
"""
366+
:params value: if True, will log detailed information such as number of bits,
367+
batch norm freezing etc.
368+
"""
369+
self._verbose = value
370+
351371
def initialize(
352372
self,
353373
module: Module,
@@ -455,7 +475,8 @@ def _check_quantization_update(
455475
module.apply(freeze_bn_stats)
456476
self._bn_stats_frozen = True
457477

458-
self._log_quantization(module, epoch, steps_per_epoch)
478+
if self._verbose:
479+
self._log_quantization(module, epoch, steps_per_epoch)
459480

460481
def _disable_quantization_observer_update_ready(self, epoch: float) -> bool:
461482
return (

0 commit comments

Comments
 (0)