@@ -390,17 +390,17 @@ def __init__(
390390 self .vllm_tensor_parallel_size = args .vllm_tensor_parallel_size # only applies to colocation mode
391391 self .vllm_importance_sampling_correction = args .vllm_importance_sampling_correction
392392 self .vllm_importance_sampling_cap = args .vllm_importance_sampling_cap
393- self .use_liger_loss = args .use_liger_loss
393+ self .use_liger_kernel = args .use_liger_kernel
394394 self .loss_type = args .loss_type
395395 self .scale_rewards = args .scale_rewards
396396 self .importance_sampling_level = args .importance_sampling_level
397397 self .mask_truncated_completions = args .mask_truncated_completions
398398 self .top_entropy_quantile = args .top_entropy_quantile
399- if self .use_liger_loss and self .top_entropy_quantile < 1.0 :
399+ if self .use_liger_kernel and self .top_entropy_quantile < 1.0 :
400400 raise NotImplementedError (
401401 "Liger Kernels don't currently support masking token positions based on entropy."
402402 )
403- if self .use_liger_loss and not self .importance_sampling_level == "token" :
403+ if self .use_liger_kernel and not self .importance_sampling_level == "token" :
404404 raise NotImplementedError (
405405 "Liger Kernels currently only support token-level importance sampling. Please set"
406406 "`importance_sampling_level` to 'token'."
@@ -478,10 +478,10 @@ def __init__(
478478 disable_dropout_in_model (self .ref_model )
479479
480480 # Liger loss
481- if self .use_liger_loss :
481+ if self .use_liger_kernel :
482482 if not is_liger_kernel_available ():
483483 raise ImportError (
484- "Liger is required to use `liger_loss ` as the GRPO loss. Run `pip install liger-kernel`."
484+ "Liger is required to use `use_liger_kernel ` as the GRPO loss. Run `pip install liger-kernel`."
485485 )
486486 # redirect the model.module forward to the model forward to ensure pre-forward hooks are called
487487 self ._forward_redirection = _ForwardRedirection ()
@@ -1720,7 +1720,7 @@ def compute_liger_loss(self, unwrapped_model, inputs):
17201720 def compute_loss (self , model , inputs , return_outputs = False , num_items_in_batch = None ):
17211721 if return_outputs :
17221722 raise ValueError ("The GRPOTrainer does not support returning outputs" )
1723- if self .use_liger_loss :
1723+ if self .use_liger_kernel :
17241724 # Compute the loss using the liger grpo loss
17251725 unwrapped_model = self .accelerator .unwrap_model (model )
17261726 return self ._forward_redirection (model , unwrapped_model , self .compute_liger_loss , unwrapped_model , inputs )
0 commit comments