[perf, data] feat: DP workload balance #202
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# # Tests layout | |
# Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: | |
# - `tests/trainer` for testing functionality related to `verl/trainer` | |
# - `tests/models` for testing functionality related to `verl/models` | |
# - ... | |
# There are a few folders with `special_` prefix, created for special purposes: | |
# - `special_distributed`: unit tests that must run with multiple GPUs | |
# - `special_e2e`: end-to-end tests with training/generation scripts | |
# - `special_npu`: tests for NPUs | |
# - `special_sanity`: a suite of quick sanity tests | |
# - `special_standalone`: a set of test that are designed to run in dedicated environments | |
# Accelerators for tests | |
# - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. | |
# - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. | |
# # Workflow layout | |
# All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: | |
# 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` | |
# 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` | |
# 3. End-to-end tests: `e2e_*.yml` | |
# 4. Unit tests | |
# - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` | |
# - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. | |
# - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when | |
# - new workflow yaml is added to `.github/workflows` | |
# - new tests are added to workflow mentioned in 2. | |
name: e2e_ppo_trainer_megatron_vllm_2 | |
on: | |
# Trigger the workflow on push or pull request, | |
# but only for the main branch. | |
# For push, for now only anti-patterns are specified so it is more conservative | |
# and achieves higher coverage. | |
push: | |
branches: | |
- main | |
- v0.* | |
paths: | |
- "**/*.py" | |
# Other entrypoints | |
- "!verl/trainer/fsdp_sft_trainer.py" | |
# Recipes | |
- "!recipe/**" | |
# FSDP | |
- "!verl/workers/**/*dp_*.py" | |
pull_request: | |
branches: | |
- main | |
- v0.* | |
paths: | |
- "**/*.py" | |
# Other entrypoints | |
- "!docker/**" | |
# Docs | |
- "!**/*.md" | |
- "!docs/**" | |
- "!examples/**" | |
- "!tests/**" | |
- "!verl/trainer/main_*.py" | |
- "!verl/trainer/fsdp_sft_trainer.py" | |
# Recipes | |
- "!recipe/**" | |
# FSDP | |
- "!verl/workers/**/*dp_*.py" | |
# Entrypoints | |
- ".github/workflows/e2e_ppo_trainer_megatron_vllm.yml" | |
- "examples/data_preprocess/gsm8k.py" | |
- "examples/data_preprocess/geo3k.py" | |
- "tests/special_e2e/run_ppo_trainer_megatron.sh" | |
- "verl/trainer/main_ppo.py" | |
- "verl/trainer/config/ppo_megatron_trainer.yaml" | |
# Cancel jobs on the same ref if a new one is triggered | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} | |
# Declare permissions just read content. | |
permissions: | |
contents: read | |
env: | |
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2" | |
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner" | |
TRANSFORMERS_VERSION: "4.56.2" | |
jobs: | |
setup: | |
if: github.repository_owner == 'volcengine' | |
runs-on: ubuntu-latest | |
outputs: | |
runner-label: ${{ steps.create-runner.outputs.runner-label }} | |
mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }} | |
steps: | |
- uses: actions/checkout@v4 | |
- id: create-runner | |
uses: volcengine/vemlp-github-runner@v1 | |
with: | |
mode: "create" | |
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" | |
mlp-image: "${{ env.IMAGE }}" | |
e2e_ppo_trainer_megatron-deepseek-override-transformer-config: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (DeepSeek) | |
run: | | |
ray stop --force | |
SAVE_FREQ=1 MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct COMMON_PP=2 COMMON_VPP=null bash tests/special_e2e/run_ppo_trainer_megatron.sh +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_embedding_in_pipeline_split=true +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_loss_in_pipeline_split=true | |
- name: Test Megatron checkpoints merging function (DeepSeek Actor and Critic) | |
run: | | |
exp_name="deepseek-coder-1.3b-instruct-megatron-gsm8k-minimal" | |
python -m verl.model_merger test --backend megatron --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface | |
python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints/verl-test/${exp_name}/global_step_1/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/critic/huggingface | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_megatron-moe-expert-parallel: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install mbridge | |
pip3 install transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (DeepSeek) | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=grpo USE_DUMMY_MODEL=True DUMMY_MODEL_CONFIG_PATH=tests/special_e2e/ppo_trainer/expert_parallel/qwen2moe_minimal.json \ | |
PPO_MAX_TOKEN_LEN=512 FWD_MAX_TOKEN_LEN=512 \ | |
MAX_PROMPT_LENGTH=256 MAX_RESPONSE_LENGTH=256 \ | |
MODEL_ID=Qwen/Qwen1.5-MoE-A2.7B-Chat USE_MBRIDGE=True \ | |
COMMON_PP=2 COMMON_VPP=null COMMON_CP=1 COMMON_TP=4 COMMON_EP=4 COMMON_ETP=1 INFER_TP=8 \ | |
USE_DIST_CKPT=True ALL_OFFLOAD=True SKIP_SAVE_HF_MODEL=1 bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_megatron-qwen2_5vl-3b: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install transformers==$TRANSFORMERS_VERSION | |
- name: Prepare Geo3k dataset | |
run: | | |
python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k/ | |
- name: Prepare dist_ckpt of Qwen2.5-VL-3B, only supports dist_ckpt | |
run: | | |
python3 scripts/converter_hf_to_mcore.py --hf_model_path ${HOME}/models/Qwen/Qwen2.5-VL-3B-Instruct --output_path checkpoints/verl-test/qwen2.5-vl-3b-megatron | |
- name: Running Geo3k E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen) | |
run: | | |
ray stop --force | |
TRAIN_FILES=${HOME}/data/geo3k/train.parquet VAL_FILES=${HOME}/data/geo3k/test.parquet \ | |
MAX_PROMPT_LENGTH=1024 MAX_RESPONSE_LENGTH=2048 MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct ADV_ESTIMATOR=grpo \ | |
USE_DYNAMIC_BSZ=False USE_FUSED_KERNELS=True SKIP_SAVE_HF_MODEL=1 \ | |
COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 COMMON_TP=2 USE_DIST_CKPT=true \ | |
DIST_CKPT_PATH=checkpoints/verl-test/qwen2.5-vl-3b-megatron bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_vllm: | |
needs: setup | |
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test,vllm] | |
pip3 install transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
ray stop --force | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
# HF sanity | |
# - name: Running GSM8K E2E training tests on 1 L20 GPU with hf for sanity | |
# run: | | |
# ray stop --force | |
# bash tests/special_e2e/ppo_trainer/run_single_gpu.sh | |
# # HF sanity | |
# - name: Running GSM8K E2E training tests on 1 L20 GPU with engine interface for sanity. | |
# run: | | |
# ray stop --force | |
# bash tests/special_e2e/ppo_trainer/run_single_gpu_with_engine.sh | |
# Function RM | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving (FSDP_SIZE=8) | |
run: | | |
ray stop --force | |
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 SAVE_HF_MODEL=True VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-fsdp-size8" bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm after resuming | |
run: | | |
ray stop --force | |
RESUME_MODE=auto VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-fsdp-size8" bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Test merging FSDP checkpoints (Qwen Actor) | |
run: | | |
exp_name="qwen2.5-0.5b-function-reward-minimal-fsdp-size8" | |
python -m verl.model_merger test --backend fsdp --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving (DDP_SIZE=2, FSDP_SIZE=4) | |
run: | | |
ray stop --force | |
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 SAVE_HF_MODEL=True FSDP_SIZE=4 VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-ddp-size2-fsdp-size4" bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Test merging DDP+FSDP checkpoints (Qwen Actor) | |
run: | | |
exp_name="qwen2.5-0.5b-function-reward-minimal-ddp-size2-fsdp-size4" | |
python -m verl.model_merger test --backend fsdp --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving (FSDP2) | |
run: | | |
ray stop --force | |
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 SAVE_HF_MODEL=True VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-fsdp2-size8" STRATEGY=fsdp2 bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Test merging FSDP2 checkpoints (Qwen Actor) | |
run: | | |
exp_name="qwen2.5-0.5b-function-reward-minimal-fsdp2-size8" | |
python -m verl.model_merger test --backend fsdp --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface | |
- name: Running GSM8K E2E without rmpad using function rm | |
run: | | |
ray stop --force | |
RM_PAD=False bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm (GRPO) | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=grpo USE_KL=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm (ReMax) | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=remax USE_KL=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using customized reward function | |
run: | | |
ray stop --force | |
CUSTOM_REWARD_FN=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with in-reward kl and kl loss | |
run: | | |
ray stop --force | |
USE_KL=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
# LoRA tests | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with grpo lora using function rm with use_shm | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=grpo USE_SHM=True LORA_RANK=32 LOAD_FORMAT=safetensors bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with grpo lora using function rm with use_shm and layered_summon | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=grpo USE_SHM=True LORA_RANK=32 LOAD_FORMAT=safetensors LAYERED_SUMMON=True TOTAL_TRAIN_STEPS=1 SAVE_FREQ=1 FSDP_SIZE=4 VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal" bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Test GRPO LoRA checkpoints merging function | |
run: | | |
export EXP_NAME="qwen2.5-0.5b-function-reward-minimal" | |
ls checkpoints/verl-test/${EXP_NAME}/global_step_1/actor | |
cat checkpoints/verl-test/${EXP_NAME}/global_step_1/actor/huggingface/config.json | |
python3 -m verl.model_merger merge --backend fsdp --local_dir checkpoints/verl-test/${EXP_NAME}/global_step_1/actor/ --target_dir checkpoints/verl-test/${EXP_NAME}/global_step_1/actor/huggingface | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with grpo lora using function rm with use_shm and layered_summon with fsdp2 | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=grpo USE_SHM=True LORA_RANK=32 LOAD_FORMAT=safetensors LAYERED_SUMMON=True STRATEGY=fsdp2 bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
# Model RM | |
- name: Running GRPO GSM8K E2E training tests with FSDP on 8 L20 GPUs (DeepSeek) | |
run: | | |
ray stop --force | |
MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E with rmpad using model rm | |
run: | | |
ray stop --force | |
bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E without rmpad using model rm | |
run: | | |
ray stop --force | |
RM_PAD=False bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E with rmpad using model rm and ulysses sp=2 | |
run: | | |
ray stop --force | |
SP_SIZE=2 bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E with rmpad using model rm and dynamic batch size | |
run: | | |
ray stop --force | |
SEQ_BALANCE=True bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E with rmpad using model rm with Liger Kernel enabled | |
run: | | |
ray stop --force | |
LIGER=True bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E with rmpad using model rm with Fused Kernel enabled | |
run: | | |
ray stop --force | |
FUSED_KERNELS=True bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E with rmpad using model rm with Fused Kernel enabled | |
run: | | |
ray stop --force | |
FUSED_KERNEL=True FUSED_KERNEL_BACKEND=triton bash tests/special_e2e/ppo_trainer/run_model_reward.sh | |
- name: Running GSM8K E2E training tests on vllm async | |
run: | | |
ray stop --force | |
export VLLM_USE_V1=1 | |
ray start --head | |
TOTAL_TRAIN_STEPS=2 ENGINE=vllm ROLLOUT_MODE=async bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
e2e_ppo_trainer_vllm_vlm: | |
needs: setup | |
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] | |
timeout-minutes: 40 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test,gpu,vllm,geo,trl] | |
pip3 install transformers==$TRANSFORMERS_VERSION | |
# Geo3k | |
- name: Prepare GEO3K dataset | |
run: | | |
python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k/ | |
- name: Running GEO3K VLM GRPO E2E training tests on 8 L20 GPUs with rmpad using function rm | |
run: | | |
ray stop --force | |
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ | |
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ | |
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \ | |
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ | |
SP_SIZE=2 \ | |
bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GEO3K VLM PPO E2E training tests on 8 L20 GPUs with rmpad using function rm | |
run: | | |
ray stop --force | |
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ | |
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ | |
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \ | |
ADV_ESTIMATOR=gae RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ | |
SP_SIZE=2 \ | |
bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GEO3K VLM GRPO E2E lora training tests on 8 L20 GPUs with rmpad using function rm | |
run: | | |
ray stop --force | |
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ | |
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ | |
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \ | |
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ | |
SP_SIZE=2 \ | |
LORA_RANK=32 LORA_EXCLUDE=".*visual.*" \ | |
bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
cleanup: | |
runs-on: ubuntu-latest | |
needs: | |
[ | |
setup, | |
e2e_ppo_trainer_megatron-deepseek-override-transformer-config, | |
e2e_ppo_trainer_megatron-moe-expert-parallel, | |
e2e_ppo_trainer_megatron-qwen2_5vl-3b, | |
e2e_ppo_trainer_vllm, | |
e2e_ppo_trainer_vllm_vlm | |
] | |
if: always() | |
steps: | |
- id: destroy-runner | |
uses: volcengine/vemlp-github-runner@v1 | |
with: | |
mode: "destroy" | |
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" | |
mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}" |