Skip to content

Commit 4e9c764

Browse files
Remove itrex dependency for 3x example (#2016)
Signed-off-by: Kaihui-intel <[email protected]> Co-authored-by: Sun, Xuehao <[email protected]>
1 parent a0066d4 commit 4e9c764

File tree

12 files changed

+9
-23
lines changed

12 files changed

+9
-23
lines changed

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/requirements.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,5 @@ transformers
22
torch
33
sentencepiece
44
neural-compressor
5-
intel-extension-for-transformers >= 1.4.1
65
lm-eval==0.4.2
76
peft

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/run_clm_no_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def get_user_model():
6262
user_model = convert(model=user_model)
6363
user_model.eval()
6464

65-
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser
65+
from neural_compressor.evaluation.lm_eval import evaluate, LMEvalParser
6666
eval_args = LMEvalParser(
6767
model="hf",
6868
user_model=user_model,

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ pytest
88
wandb
99
einops
1010
neural-compressor
11-
intel-extension-for-transformers
12-
lm_eval==0.4.2
11+
lm_eval==0.4.3
1312
peft
1413
optimum-intel

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_clm_no_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ def eval_func(model):
185185
config = AutoConfig.from_pretrained(args.model)
186186
setattr(model, "config", config)
187187

188-
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser
188+
from neural_compressor.evaluation.lm_eval import evaluate, LMEvalParser
189189
eval_args = LMEvalParser(
190190
model="hf",
191191
user_model=model,
@@ -232,7 +232,7 @@ def eval_func(model):
232232

233233
if args.accuracy:
234234
user_model.eval()
235-
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser
235+
from neural_compressor.evaluation.lm_eval import evaluate, LMEvalParser
236236

237237
eval_args = LMEvalParser(
238238
model="hf",

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@ pytest
88
wandb
99
einops
1010
neural-compressor
11-
intel-extension-for-transformers
12-
lm_eval==0.4.2
11+
lm_eval==0.4.3
1312
peft

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_clm_no_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ def run_fn(model):
212212

213213
if args.accuracy:
214214
user_model.eval()
215-
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser
215+
from neural_compressor.evaluation.lm_eval import evaluate, LMEvalParser
216216
eval_args = LMEvalParser(
217217
model="hf",
218218
user_model=user_model,
@@ -232,7 +232,7 @@ def run_fn(model):
232232

233233
if args.performance:
234234
user_model.eval()
235-
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser
235+
from neural_compressor.evaluation.lm_eval import evaluate, LMEvalParser
236236
import time
237237

238238
samples = args.iters * args.batch_size

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,5 @@ transformers
22
torch
33
sentencepiece
44
neural-compressor
5-
intel-extension-for-transformers >= 1.4.1
6-
lm-eval==0.4.2
5+
lm-eval==0.4.3
76
peft

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_clm_no_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def get_example_inputs(tokenizer):
116116

117117
if args.accuracy:
118118

119-
from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser
119+
from neural_compressor.evaluation.lm_eval import evaluate, LMEvalParser
120120
eval_args = LMEvalParser(
121121
model="hf",
122122
user_model=user_model,

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/llm_quantization_recipes.md

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,6 @@ The scripts [run_generation_sq.py](./run_generation_sq.py) and [run_generation_c
3030

3131
```bash
3232
# Installation
33-
git clone https://github.com/intel/intel-extension-for-transformers.git
34-
35-
# install ITREX
36-
cd intel-extension-for-transformers
37-
pip install -r requirements.txt
38-
pip install -v .
3933

4034
# install requirements
4135
cd examples/huggingface/pytorch/text-generation/quantization

examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/run_generation_cpu_woq.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,6 @@
293293
_commit_hash=args._commit_hash,
294294
)
295295
elif args.load_in_4bit or args.load_in_8bit:
296-
# CPU device usage is provided by intel-extension-for-transformers.
297296
user_model = AutoModelForCausalLM.from_pretrained(
298297
args.model,
299298
load_in_4bit=args.load_in_4bit,

0 commit comments

Comments
 (0)