[rollout,vllm] fix: Add LoRA Loading to Async vLLM #190
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# # Tests layout | |
# Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: | |
# - `tests/trainer` for testing functionality related to `verl/trainer` | |
# - `tests/models` for testing functionality related to `verl/models` | |
# - ... | |
# There are a few folders with `special_` prefix, created for special purposes: | |
# - `special_distributed`: unit tests that must run with multiple GPUs | |
# - `special_e2e`: end-to-end tests with training/generation scripts | |
# - `special_npu`: tests for NPUs | |
# - `special_sanity`: a suite of quick sanity tests | |
# - `special_standalone`: a set of test that are designed to run in dedicated environments | |
# Accelerators for tests | |
# - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. | |
# - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. | |
# # Workflow layout | |
# All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: | |
# 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` | |
# 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` | |
# 3. End-to-end tests: `e2e_*.yml` | |
# 4. Unit tests | |
# - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` | |
# - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. | |
# - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when | |
# - new workflow yaml is added to `.github/workflows` | |
# - new tests are added to workflow mentioned in 2. | |
name: e2e_ppo_trainer_megatron_sglang_2 | |
on: | |
# Trigger the workflow on push or pull request, | |
# but only for the main branch. | |
# For push, for now only anti-patterns are specified so it is more conservative | |
# and achieves higher coverage. | |
push: | |
branches: | |
- main | |
- v0.* | |
paths: | |
- "**/*.py" | |
# Other entrypoints | |
- "!verl/trainer/fsdp_sft_trainer.py" | |
# Recipes | |
- "!recipe/**" | |
# FSDP | |
- "!verl/workers/**/*dp_*.py" | |
pull_request: | |
branches: | |
- main | |
- v0.* | |
paths: | |
- "**/*.py" | |
# Other entrypoints | |
- "!docker/**" | |
# Docs | |
- "!**/*.md" | |
- "!docs/**" | |
- "!examples/**" | |
- "!tests/**" | |
- "!verl/trainer/main_*.py" | |
- "!verl/trainer/fsdp_sft_trainer.py" | |
# Recipes | |
- "!recipe/**" | |
# FSDP | |
- "!verl/workers/**/*dp_*.py" | |
# Entrypoints | |
- "verl/worksers/rollout/sglang_rollout/*" | |
- ".github/workflows/e2e_ppo_trainer_megatron_sglang.yml" | |
- "examples/data_preprocess/gsm8k.py" | |
- "examples/data_preprocess/geo3k.py" | |
- "tests/special_e2e/run_ppo_trainer_megatron.sh" | |
- "verl/trainer/main_ppo.py" | |
- "verl/trainer/config/ppo_megatron_trainer.yaml" | |
# Cancel jobs on the same ref if a new one is triggered | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} | |
# Declare permissions just read content. | |
permissions: | |
contents: read | |
env: | |
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2" | |
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner" | |
jobs: | |
setup: | |
if: github.repository_owner == 'volcengine' | |
runs-on: ubuntu-latest | |
outputs: | |
runner-label: ${{ steps.create-runner.outputs.runner-label }} | |
mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }} | |
steps: | |
- uses: actions/checkout@v4 | |
- id: create-runner | |
uses: volcengine/vemlp-github-runner@v1 | |
with: | |
mode: "create" | |
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" | |
mlp-image: "${{ env.IMAGE }}" | |
e2e_ppo_trainer_megatron-moe-expert-parallel: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
- name: Prepare GSM8K dataset | |
run: | | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (DeepSeek) | |
run: | | |
ray stop --force | |
MEGATRON_CI_DISABLE_EXPANDABLE_SEGMENTS=1 \ | |
ADV_ESTIMATOR=grpo USE_DUMMY_MODEL=True DUMMY_MODEL_CONFIG_PATH=tests/special_e2e/ppo_trainer/expert_parallel/qwen2moe_minimal.json \ | |
PPO_MAX_TOKEN_LEN=512 FWD_MAX_TOKEN_LEN=512 \ | |
MAX_PROMPT_LENGTH=256 MAX_RESPONSE_LENGTH=256 \ | |
MODEL_ID=Qwen/Qwen1.5-MoE-A2.7B-Chat \ | |
ENGINE=sglang COMMON_PP=2 COMMON_VPP=null COMMON_CP=1 COMMON_TP=4 COMMON_EP=4 COMMON_ETP=1 INFER_TP=8 \ | |
USE_DIST_CKPT=True ALL_OFFLOAD=True SKIP_SAVE_HF_MODEL=1 bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_megatron-qwen2_5vl-3b: | |
needs: setup | |
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install --no-deps -e .[test] | |
- name: Prepare Geo3k dataset | |
run: | | |
python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k/ | |
- name: Prepare dist_ckpt of Qwen2.5-VL-3B, only supports dist_ckpt | |
run: | | |
python3 scripts/converter_hf_to_mcore.py --hf_model_path ${HOME}/models/Qwen/Qwen2.5-VL-3B-Instruct --output_path checkpoints/verl-test/qwen2.5-vl-3b-megatron | |
- name: Running Geo3k E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen) | |
run: | | |
ray stop --force | |
ENGINE=sglang TRAIN_FILES=${HOME}/data/geo3k/train.parquet VAL_FILES=${HOME}/data/geo3k/test.parquet MAX_PROMPT_LENGTH=1024 MAX_RESPONSE_LENGTH=2048 MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct ADV_ESTIMATOR=grpo USE_DYNAMIC_BSZ=False SKIP_SAVE_HF_MODEL=1 COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 COMMON_TP=2 USE_DIST_CKPT=true DIST_CKPT_PATH=checkpoints/verl-test/qwen2.5-vl-3b-megatron bash tests/special_e2e/run_ppo_trainer_megatron.sh | |
- name: clean up | |
run: | | |
rm -rf checkpoints | |
e2e_ppo_trainer_sglang: | |
needs: setup | |
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] | |
timeout-minutes: 40 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install -e .[test,gpu,sglang] | |
- name: Prepare gsm8k dataset | |
run: | | |
ray stop --force | |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k | |
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm and save ckpt | |
run: | | |
ray stop --force | |
ENGINE=sglang bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GSM8K E2E training tests on sglang async | |
run: | | |
ray stop --force | |
TOTAL_TRAIN_STEPS=2 ENGINE=sglang ROLLOUT_MODE=async bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
e2e_ppo_trainer_sglang_vlm: | |
needs: setup | |
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] | |
timeout-minutes: 60 # Increase this timeout value as needed | |
env: | |
HTTP_PROXY: ${{ secrets.PROXY_HTTP }} | |
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} | |
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" | |
HF_ENDPOINT: "https://hf-mirror.com" | |
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
steps: | |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
with: | |
fetch-depth: 0 | |
- name: Install the current repository | |
run: | | |
pip3 install -e .[test,geo,gpu,sglang] --no-deps | |
# Geo3k | |
- name: Prepare GEO3K dataset | |
run: | | |
ray stop --force | |
python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k/ | |
- name: Running GEO3K VLM E2E training tests on 8 L20 GPUs with rmpad using function rm | |
run: | | |
ray stop --force | |
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ | |
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ | |
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \ | |
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ | |
ENGINE=sglang GPU_MEMORY_UTILIZATION=0.6 ACTOR_FSDP_PARAM_OFFLOAD=True \ | |
ACTOR_FSDP_OPTIMIZER_OFFLOAD=True REF_FSDP_PARAM_OFFLOAD=True \ | |
bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GEO3K VLM E2E with rmpad using torch fused kernel (Qwen2.5-VL) | |
run: | | |
ray stop --force | |
FUSED_KERNELS=True TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ | |
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ | |
MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct \ | |
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ | |
ENGINE=sglang GPU_MEMORY_UTILIZATION=0.6 ACTOR_FSDP_PARAM_OFFLOAD=True \ | |
ACTOR_FSDP_OPTIMIZER_OFFLOAD=True REF_FSDP_PARAM_OFFLOAD=True \ | |
bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
- name: Running GEO3K VLM E2E with rmpad using triton fused kernel (Qwen2.5-VL) | |
run: | | |
ray stop --force | |
FUSED_KERNELS=True FUSED_KERNEL_BACKEND=triton \ | |
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ | |
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ | |
MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct \ | |
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ | |
ENGINE=sglang GPU_MEMORY_UTILIZATION=0.6 ACTOR_FSDP_PARAM_OFFLOAD=True \ | |
ACTOR_FSDP_OPTIMIZER_OFFLOAD=True REF_FSDP_PARAM_OFFLOAD=True \ | |
bash tests/special_e2e/ppo_trainer/run_function_reward.sh | |
cleanup: | |
runs-on: ubuntu-latest | |
needs: | |
[ | |
setup, | |
e2e_ppo_trainer_megatron-moe-expert-parallel, | |
e2e_ppo_trainer_megatron-qwen2_5vl-3b, | |
e2e_ppo_trainer_sglang, | |
e2e_ppo_trainer_sglang_vlm | |
] | |
if: always() | |
steps: | |
- id: destroy-runner | |
uses: volcengine/vemlp-github-runner@v1 | |
with: | |
mode: "destroy" | |
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" | |
mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}" |