We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fa0f793 commit 7f11b97Copy full SHA for 7f11b97
src/llmcompressor/transformers/sparsification/compressed_tensors_utils.py
@@ -11,7 +11,7 @@
11
CompressionFormat,
12
ModelCompressor,
13
SparsityCompressionConfig,
14
- is_module_offloaded,
+ has_offloaded_params,
15
update_offload_parameter,
16
)
17
from loguru import logger
@@ -162,7 +162,7 @@ def patch_tied_tensors_bug(model: torch.nn.Module):
162
163
if storage_ptr(input_embed.weight) == storage_ptr(output_embed.weight):
164
for module in (input_embed, output_embed):
165
- if not is_module_offloaded(module):
+ if not has_offloaded_params(module):
166
# create new storage ptr for onloaded weight
167
untied_data = module.weight.data.clone()
168
module.weight.data = untied_data
0 commit comments