Skip to content

🎬 Clip higher #3118

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Mar 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions trl/trainer/grpo_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ class GRPOConfig(TrainingArguments):
Number of iterations per batch (denoted as μ in the algorithm).
epsilon (`float`, *optional*, defaults to `0.2`):
Epsilon value for clipping.
epsilon_high (`float` or `None`, *optional*, defaults to `None`):
Upper-bound epsilon value for clipping. If not specified, it defaults to the same value as the lower-bound
specified in argument `epsilon`. Paper [DAPO](https://huggingface.co/papers/2503.14476) recommends `0.28`.
reward_weights (`list[float]` or `None`, *optional*, defaults to `None`):
Weights for each reward function. Must match the number of reward functions. If `None`, all rewards are
weighted equally with weight `1.0`.
Expand Down Expand Up @@ -300,6 +303,13 @@ class GRPOConfig(TrainingArguments):
default=0.2,
metadata={"help": "Epsilon value for clipping."},
)
epsilon_high: Optional[float] = field(
default=None,
metadata={
"help": "Upper-bound epsilon value for clipping. If not specified, it defaults to the same value as the "
"lower-bound specified in argument `epsilon`. Paper DAPO recommends `0.28`."
},
)
reward_weights: Optional[list[float]] = field(
default=None,
metadata={
Expand Down
5 changes: 3 additions & 2 deletions trl/trainer/grpo_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,8 @@ def data_collator(features): # No data collation is needed in GRPO

# Multi-step
self.num_iterations = args.num_iterations # = 𝜇 in the GRPO paper
self.epsilon = args.epsilon
self.epsilon_low = args.epsilon
self.epsilon_high = args.epsilon_high if args.epsilon_high is not None else args.epsilon
# Tracks the number of iterations (forward + backward passes), including those within a gradient accumulation cycle.
self._step = 0
# Buffer the batch to reuse generated outputs across multiple updates. For more details, see
Expand Down Expand Up @@ -975,7 +976,7 @@ def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=N
# _generate_and_score_completions) and use per_token_logps.detach() instead.
old_per_token_logps = inputs["old_per_token_logps"] if self.num_iterations > 1 else per_token_logps.detach()
coef_1 = torch.exp(per_token_logps - old_per_token_logps)
coef_2 = torch.clamp(coef_1, 1 - self.epsilon, 1 + self.epsilon)
coef_2 = torch.clamp(coef_1, 1 - self.epsilon_low, 1 + self.epsilon_high)
per_token_loss1 = coef_1 * advantages.unsqueeze(1)
per_token_loss2 = coef_2 * advantages.unsqueeze(1)
per_token_loss = -torch.min(per_token_loss1, per_token_loss2)
Expand Down
Loading