Skip to content

Commit dcfe952

Browse files
authored
Update Dockerfile to build for Blackwell (#18095)
1 parent 48ac2be commit dcfe952

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

docker/Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
7777
# can be useful for both `dev` and `test`
7878
# explicitly set the list to avoid issues with torch 2.2
7979
# see https://github.com/pytorch/pytorch/pull/123243
80-
ARG torch_cuda_arch_list='7.0 7.5 8.0 8.6 8.9 9.0+PTX'
80+
ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0+PTX'
8181
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
8282
# Override the arch list for flash-attn to reduce the binary size
8383
ARG vllm_fa_cmake_gpu_arches='80-real;90-real'
@@ -257,8 +257,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
257257
if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \
258258
# uv pip install --system https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.4/flashinfer_python-0.2.4+cu124torch2.6-cp38-abi3-linux_x86_64.whl ; \
259259
# TESTING: install FlashInfer from source to test 2.7.0 final RC
260-
FLASHINFER_ENABLE_AOT=1 TORCH_CUDA_ARCH_LIST='7.5 8.0 8.6 8.9 9.0+PTX' \
261-
uv pip install --system --no-build-isolation "git+https://github.com/flashinfer-ai/flashinfer@v0.2.4" ; \
260+
FLASHINFER_ENABLE_AOT=1 TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0 10.0+PTX' \
261+
uv pip install --system --no-build-isolation "git+https://github.com/flashinfer-ai/flashinfer@e00e8cedbfcb220f328fd36aa8f529f869b01e6b" ; \
262262
fi
263263
COPY examples examples
264264
COPY benchmarks benchmarks

0 commit comments

Comments
 (0)