[rollout,vllm] fix: Add LoRA Loading to Async vLLM #2641
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# # Tests layout | |
# Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: | |
# - `tests/trainer` for testing functionality related to `verl/trainer` | |
# - `tests/models` for testing functionality related to `verl/models` | |
# - ... | |
# There are a few folders with `special_` prefix, created for special purposes: | |
# - `special_distributed`: unit tests that must run with multiple GPUs | |
# - `special_e2e`: end-to-end tests with training/generation scripts | |
# - `special_npu`: tests for NPUs | |
# - `special_sanity`: a suite of quick sanity tests | |
# - `special_standalone`: a set of test that are designed to run in dedicated environments | |
# Accelerators for tests | |
# - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. | |
# - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. | |
# # Workflow layout | |
# All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: | |
# 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` | |
# 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` | |
# 3. End-to-end tests: `e2e_*.yml` | |
# 4. Unit tests | |
# - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` | |
# - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. | |
# - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when | |
# - new workflow yaml is added to `.github/workflows` | |
# - new tests are added to workflow mentioned in 2. | |
name: e2e_ppo_trainer_megatron_vllm | |
on: | |
# Trigger the workflow on push or pull request, | |
# but only for the main branch. | |
# For push, for now only anti-patterns are specified so it is more conservative | |
# and achieves higher coverage. | |
push: | |
branches: | |
- main | |
- v0.* | |
paths: | |
- "**/*.py" | |
# Other entrypoints | |
- "!verl/trainer/fsdp_sft_trainer.py" | |
# Recipes | |
- "!recipe/**" | |
# FSDP | |
- "!verl/workers/**/*dp_*.py" | |
pull_request: | |
branches: | |
- main | |
- v0.* | |
paths: | |
- "**/*.py" | |
# Other entrypoints | |
- "!docker/**" | |
# Docs | |
- "!**/*.md" | |
- "!docs/**" | |
- "!examples/**" | |
- "!tests/**" | |
- "!verl/trainer/main_*.py" | |
- "!verl/trainer/fsdp_sft_trainer.py" | |
# Recipes | |
- "!recipe/**" | |
# FSDP | |
- "!verl/workers/**/*dp_*.py" | |
# Entrypoints | |
- ".github/workflows/e2e_ppo_trainer_megatron_vllm.yml" | |
- "examples/data_preprocess/gsm8k.py" | |
- "examples/data_preprocess/geo3k.py" | |
- "tests/special_e2e/run_ppo_trainer_megatron.sh" | |
- "verl/trainer/main_ppo.py" | |
- "verl/trainer/config/ppo_megatron_trainer.yaml" | |
# Cancel jobs on the same ref if a new one is triggered | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} | |
# Declare permissions just read content. | |
permissions: | |
contents: read | |
env: | |
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2" | |
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner" | |
TRANSFORMERS_VERSION: "4.56.2" | |
jobs: | |
setup: | |
if: github.repository_owner == 'volcengine' | |
runs-on: ubuntu-latest | |
outputs: | |
runner-label: ${{ steps.create-runner.outputs.runner-label }} | |
mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }} | |
steps: | |
- uses: actions/checkout@v4 | |
- id: create-runner | |
uses: volcengine/vemlp-github-runner@v1 | |
with: | |
mode: "create" | |
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" | |
mlp-image: "${{ env.IMAGE }}" | |
e2e_ppo_trainer_megatron-deepseek: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install math-verify transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron, use mbridge e2e to pre-load and save (Deepseek) | |
run: | | |
ray stop --force | |
ALL_OFFLOAD=True SAVE_FREQ=1 MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 USE_MBRIDGE=True USE_DIST_CKPT=False \ | |
bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron, use mbridge e2e to pre-load and save (Deepseek) | |
run: | | |
ray stop --force | |
RESUME_MODE=auto MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct TOTAL_TRAIN_STEPS=2 SAVE_FREQ=1 COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 USE_MBRIDGE=True USE_DIST_CKPT=False \ | |
bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (DeepSeek) | |
run: | | |
ray stop --force | |
export VLLM_USE_V1=1 | |
ray start --head | |
MODE=async USE_FUSED_KERNELS=True MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct TOTAL_TRAIN_STEPS=2 SAVE_FREQ=2 bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Test Megatron checkpoints merging function (DeepSeek Actor and Critic) | |
run: | | |
exp_name="deepseek-coder-1.3b-instruct-megatron-gsm8k-minimal" | |
python -m verl.model_merger test --backend megatron --local_dir checkpoints/verl-test/${exp_name}/global_step_2/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_2/actor/huggingface | |
python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints/verl-test/${exp_name}/global_step_2/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_2/critic/huggingface | |
- name: Test Megatron distributed checkpoints merging function (DeepSeek) | |
run: | | |
exp_name="deepseek-coder-1.3b-instruct-megatron-gsm8k-minimal" | |
torchrun --nproc_per_node 4 --nnodes 1 -m verl.model_merger merge --backend megatron --local_dir checkpoints/verl-test/${exp_name}/global_step_2/actor --target_dir checkpoints/verl-test/${exp_name}/global_step_2/actor/hf_model | |
- name: Running GRPO GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Deepseek) | |
run: | | |
ray stop --force | |
ADV_ESTIMATOR=grpo USE_DYNAMIC_BSZ=False MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_megatron-qwen3: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install math-verify transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen3) with validation and saving | |
run: | | |
ray stop --force | |
ALL_OFFLOAD=True VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 MODEL_ID=Qwen/Qwen3-0.6B bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen3) testing learning rate scheduler | |
run: | | |
ray stop --force | |
LR_WARMUP_STEPS=1 TOTAL_TRAIN_STEPS=2 MODEL_ID=Qwen/Qwen3-0.6B bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Test Megatron checkpoints merging function (Qwen3 Actor and Critic) | |
run: | | |
exp_name="qwen3-0.6b-megatron-gsm8k-minimal" | |
python -m verl.model_merger test --backend megatron --tie-word-embedding --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface | |
python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints/verl-test/${exp_name}/global_step_1/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/critic/huggingface | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_megatron-different-train-infer-tp-qwen-tie-embedding: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install math-verify transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with tie-embedding Megatron (Qwen) with train tp > infer tp | |
run: | | |
ray stop --force | |
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 TRAIN_TP=2 INFER_TP=1 MODEL_ID=Qwen/Qwen2.5-1.5B bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen) with train tp < infer tp | |
run: | | |
ray stop --force | |
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 TRAIN_TP=1 INFER_TP=2 ALL_OFFLOAD=True MODEL_ID=Qwen/Qwen2.5-1.5B bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_megatron-qwen-override-transformer-config: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
pip3 install math-verify transformers==$TRANSFORMERS_VERSION | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
# - name: Download Model to Use | |
# run: | | |
# huggingface-cli download Qwen/Qwen2.5-0.5B --local-dir ${HOME}/models/Qwen/Qwen2.5-0.5B | |
# export HF_HUB_OFFLINE=1 | |
- name: Prepare dist_ckpt of Qwen2.5-0.5B, uneven layer distribution only supports dist_ckpt | |
run: | | |
python3 scripts/converter_hf_to_mcore.py --hf_model_path ${HOME}/models/Qwen/Qwen2.5-0.5B --output_path checkpoints/verl-test/qwen2.5-0.5b-megatron | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen) | |
run: | | |
ray stop --force | |
SAVE_FREQ=1 COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 SKIP_SAVE_HF_MODEL=1 USE_DIST_CKPT=True DIST_CKPT_PATH=checkpoints/verl-test/qwen2.5-0.5b-megatron \ | |
bash tests/special_e2e/run_ppo_trainer_megatron.sh +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_first_pipeline_stage=8 +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=4 | |
cp -r checkpoints checkpoints-dut | |
SAVE_FREQ=1 COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: Test Megatron checkpoints merging function (Qwen Actor and Critic) | |
run: | | |
exp_name="qwen2.5-0.5b-megatron-gsm8k-minimal" | |
python -m verl.model_merger test --backend megatron --tie-word-embedding --local_dir checkpoints-dut/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface | |
python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints-dut/verl-test/${exp_name}/global_step_1/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/critic/huggingface | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
cleanup: | |
runs-on: ubuntu-latest | |
needs: | |
[ | |
setup, | |
e2e_ppo_trainer_megatron-deepseek, | |
e2e_ppo_trainer_megatron-qwen3, | |
e2e_ppo_trainer_megatron-different-train-infer-tp-qwen-tie-embedding, | |
e2e_ppo_trainer_megatron-qwen-override-transformer-config, | |
] | |
if: always() | |
steps: | |
- id: destroy-runner | |
uses: volcengine/vemlp-github-runner@v1 | |
with: | |
mode: "destroy" | |
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" | |
mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}" |