Skip to content

Commit cc368a8

Browse files
authored
fix ipex prepare model unsupport deepcopy issue and add ut (#1174)
1 parent 135e52f commit cc368a8

File tree

2 files changed

+34
-11
lines changed

2 files changed

+34
-11
lines changed

neural_compressor/adaptor/pytorch.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2362,8 +2362,8 @@ def _get_quantizable_ops_recursively(self, model, prefix, quantizable_ops):
23622362
assert isinstance(model, torch.nn.Module), \
23632363
"The model passed in is not the instance of torch.nn.Module"
23642364

2365-
model_ = copy.deepcopy(model)
23662365
if not IPEX_110 and not IPEX_112:
2366+
model_ = copy.deepcopy(model)
23672367
model_.eval().to(ipex.DEVICE)
23682368
try:
23692369
init_model = torch.jit.script(model_)
@@ -2377,10 +2377,15 @@ def _get_quantizable_ops_recursively(self, model, prefix, quantizable_ops):
23772377
"Fail to convert this model to PyTorch Script model"
23782378
)
23792379
init_model = model_
2380+
elif IPEX_110:
2381+
init_model = copy.deepcopy(model)
2382+
init_model.eval()
23802383
else:
2381-
model_.eval()
2382-
init_model = model_
2383-
2384+
if hasattr(model,'save_qconf_summary'):
2385+
init_model = ipex.quantization._quantize_utils.copy_prepared_model(model)
2386+
else:
2387+
init_model = copy.deepcopy(model)
2388+
init_model.eval()
23842389
# create a quantization config file for intel pytorch extension model
23852390
os.makedirs(os.path.dirname(self.ipex_config_path), exist_ok=True)
23862391
if not IPEX_110 and not IPEX_112:

test/ipex/test_adaptor_ipex.py

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,9 @@
11
import torch
22
import unittest
33
import os
4-
from neural_compressor.adaptor import FRAMEWORKS
5-
from neural_compressor.model import MODELS
64
from neural_compressor.adaptor.pytorch import PyTorchVersionMode
75
import neural_compressor.adaptor.pytorch as nc_torch
86
from neural_compressor.experimental import Quantization, common
9-
from neural_compressor.conf.config import QuantConf
10-
from neural_compressor.utils.pytorch import load
11-
from neural_compressor.utils.utility import recover
127
import shutil
138
import copy
149
import numpy as np
@@ -20,6 +15,7 @@
2015
except:
2116
TEST_IPEX = False
2217

18+
torch.manual_seed(9527)
2319
assert TEST_IPEX, "Please install intel extension for pytorch"
2420
# get torch and IPEX version
2521
PT_VERSION = nc_torch.get_torch_version()
@@ -89,7 +85,7 @@ def test_tuning_ipex(self):
8985
ipex_conf = ipex.quantization.QuantConf(
9086
configure_file="./saved/best_configure.json",
9187
)
92-
q_model = ipex.quantization.convert(model, ipex_conf, torch.randn(1, 3, 224, 224))
88+
q_model = ipex.quantization.convert(model, ipex_conf, torch.ones(1, 3, 224, 224))
9389
from neural_compressor.experimental import Benchmark
9490
evaluator = Benchmark('ipex_yaml.yaml')
9591
evaluator.model = q_model
@@ -121,7 +117,7 @@ def test_tuning_ipex(self):
121117
nc_model = quantizer.fit()
122118
nc_model.save('./saved')
123119
qconfig = ipex.quantization.default_static_qconfig
124-
prepared_model = ipex.quantization.prepare(model, qconfig, example_inputs=torch.randn(1, 3, 224, 224), inplace=False)
120+
prepared_model = ipex.quantization.prepare(model, qconfig, example_inputs=torch.ones(1, 3, 224, 224), inplace=False)
125121
prepared_model.load_qconf_summary(qconf_summary = "./saved/best_configure.json")
126122
convert_model = ipex.quantization.convert(prepared_model)
127123
from neural_compressor.experimental import Benchmark
@@ -130,5 +126,27 @@ def test_tuning_ipex(self):
130126
evaluator.b_dataloader = common.DataLoader(dataset)
131127
evaluator.fit('accuracy')
132128

129+
def test_tuning_ipex_for_ipex_autotune_func(self):
130+
from neural_compressor.experimental import Quantization
131+
model = M()
132+
qconfig = ipex.quantization.default_static_qconfig
133+
prepared_model = ipex.quantization.prepare(model, qconfig, example_inputs=torch.ones(1, 3, 224, 224), inplace=False)
134+
quantizer = Quantization('ipex_yaml.yaml')
135+
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
136+
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
137+
quantizer.model = prepared_model
138+
quantizer.calib_dataloader = common.DataLoader(dataset)
139+
quantizer.eval_dataloader = common.DataLoader(dataset)
140+
nc_model = quantizer.fit()
141+
nc_model.save('./saved')
142+
qconfig = ipex.quantization.default_static_qconfig
143+
prepared_model = ipex.quantization.prepare(model, qconfig, example_inputs=torch.ones(1, 3, 224, 224), inplace=False)
144+
prepared_model.load_qconf_summary(qconf_summary = "./saved/best_configure.json")
145+
convert_model = ipex.quantization.convert(prepared_model)
146+
from neural_compressor.experimental import Benchmark
147+
evaluator = Benchmark('ipex_yaml.yaml')
148+
evaluator.model = convert_model
149+
evaluator.b_dataloader = common.DataLoader(dataset)
150+
evaluator.fit('accuracy')
133151
if __name__ == "__main__":
134152
unittest.main()

0 commit comments

Comments
 (0)