We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d607b1a commit 6c7d87bCopy full SHA for 6c7d87b
swift/llm/tuner.py
@@ -76,6 +76,7 @@ def prepare_model(model, args: SftArguments):
76
'rank_pattern': args.lora_rank_pattern,
77
'alpha_pattern': args.lora_alpha_pattern,
78
'loftq_config': args.lora_loftq_config,
79
+ 'use_rslora': args.use_rslora,
80
'use_dora': args.use_dora,
81
}
82
if args.sft_type == 'lora':
@@ -84,6 +85,7 @@ def prepare_model(model, args: SftArguments):
84
85
lora_config = LoRAConfig(
86
lora_dtype=args.lora_dtype, **lora_kwargs)
87
elif args.tuner_backend == 'peft':
88
+ assert args.lora_lr_ratio is None, 'Please use tuner_backend="swift" to use LoRA+'
89
lora_config = LoraConfig(
90
task_type='CAUSAL_LM', **lora_kwargs)
91
model = Swift.prepare_model(model, lora_config)
0 commit comments