1
1
# Set environment variables during runtime.
2
2
ARG CUDA_VER
3
- ARG DISTRO_ARCH
4
3
ARG DISTRO_NAME
5
4
ARG DISTRO_VER
6
- FROM --platform=linux/${DISTRO_ARCH} nvidia/cuda:${CUDA_VER}-devel-${DISTRO_NAME}${DISTRO_VER}
5
+ FROM --platform=linux/amd64 nvidia/cuda:${CUDA_VER}-devel-${DISTRO_NAME}${DISTRO_VER}
7
6
8
7
LABEL maintainer=
"conda-forge <[email protected] >"
9
8
10
- # Set `ARG`s during runtime.
11
9
ARG CUDA_VER
12
- ARG DISTRO_ARCH
13
10
ARG DISTRO_NAME
14
11
ARG DISTRO_VER
15
12
ENV CUDA_VER=${CUDA_VER} \
16
- DISTRO_ARCH=${DISTRO_ARCH} \
17
13
DISTRO_NAME=${DISTRO_NAME} \
18
14
DISTRO_VER=${DISTRO_VER}
19
15
20
16
# Set an encoding to make things work smoothly.
21
17
ENV LANG en_US.UTF-8
22
- ENV LANGUAGE en_US.UTF-8
23
18
24
19
# Set path to CUDA install (this is a symlink to /usr/local/cuda-${CUDA_VER})
25
20
ENV CUDA_HOME /usr/local/cuda
26
21
27
- # bust the docker cache so that we always rerun the installs below
28
- ADD https://loripsum.net/api /opt/docker/etc/gibberish
29
-
30
- # Add qemu in here so that we can use this image on regular linux hosts with qemu user installed
31
- ADD qemu-aarch64-static /usr/bin/qemu-aarch64-static
32
- ADD qemu-ppc64le-static /usr/bin/qemu-ppc64le-static
33
-
34
22
# we want to persist a path in ldconfig (to avoid having to always set LD_LIBRARY_PATH), but *after* the existing entries;
35
23
# since entries in ld.so.conf.d have precedence before the preconfigured directories, we first add the latter to the former
36
- # the upstream images all have libcuda.so under $CUDA_HOME/compat;
37
- # add this to the ldconfig so it will be found correctly.
38
- # don't forget to update settings by running ldconfig
39
- RUN ldconfig -v 2>/dev/null | grep -v ^$'\t ' | cut -f1 -d":" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf && \
40
- echo "$CUDA_HOME/compat" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf && \
41
- ldconfig
24
+ RUN ldconfig -v 2>/dev/null | grep -v ^$'\t ' | cut -f1 -d":" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf \
25
+ && if [ ${CUDA_VER} != "9.2" ]; then \
26
+ # the upstream images for 10.x all have libcuda.so under $CUDA_HOME/compat;
27
+ # add this to the ldconfig so it will be found correctly.
28
+ echo "$CUDA_HOME/compat" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf ; \
29
+ else \
30
+ # For 9.2, the image nvidia/cuda:9.2-devel-centos6 contains neither
31
+ # $CUDA_HOME/compat, nor any (non-stub) libcuda.so. We fix this by
32
+ # adding cuda-compat-10.0 (which is not used for building, but to
33
+ # test if loading the respective library/package works). However,
34
+ # due to licensing reasons, these cannot be part of the conda-forge
35
+ # docker images, but are instead added for CI purposes in:
36
+ # github.com/conda-forge/conda-forge-ci-setup-feedstock/blob/master/recipe/run_conda_forge_build_setup_linux
37
+ # Here we only set the ldconfig accordingly.
38
+ echo "/usr/local/cuda-10.0/compat" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf ; \
39
+ fi \
40
+ # don't forget to update settings by running ldconfig
41
+ && ldconfig
42
+
43
+ # bust the docker cache so that we always rerun the installs below
44
+ ADD https://loripsum.net/api /opt/docker/etc/gibberish
42
45
43
46
# Add the archived repo URL and fix RPM imports
44
47
ADD centos7-repos /tmp/centos7-repos
45
48
ADD scripts/fix_rpm /opt/docker/bin/fix_rpm
46
49
RUN /opt/docker/bin/fix_rpm
47
50
48
- # Add custom `yum_clean_all` script before using `yum`
49
- COPY scripts/yum_clean_all /opt/docker/bin/
50
-
51
51
# Install basic requirements.
52
+ COPY scripts/yum_clean_all /opt/docker/bin/
52
53
RUN yum update -y --disablerepo=cuda && \
53
54
yum install -y \
54
55
bzip2 \
55
56
sudo \
56
57
tar \
57
- which \
58
- && \
58
+ which && \
59
59
/opt/docker/bin/yum_clean_all
60
60
61
- # Fix locale in UBI 8 images
62
- # See https://github.com/CentOS/sig-cloud-instance-images/issues/154
63
- RUN if [ "${DISTRO_NAME}${DISTRO_VER}" = "ubi8" ]; then \
64
- yum install -y \
65
- glibc-langpack-en \
66
- && \
67
- /opt/docker/bin/yum_clean_all; \
68
- fi
69
-
70
61
# Remove preinclude system compilers
71
62
RUN rpm -e --nodeps --verbose gcc gcc-c++
72
63
@@ -85,14 +76,26 @@ RUN source /opt/conda/etc/profile.d/conda.sh && \
85
76
chgrp -R lucky /opt/conda && \
86
77
chmod -R g=u /opt/conda
87
78
79
+ # Symlink CUDA headers that were moved from $CUDA_HOME/include to /usr/include
80
+ # in CUDA 10.1.
81
+ RUN for HEADER_FILE in cublas_api.h cublas.h cublasLt.h cublas_v2.h cublasXt.h nvblas.h; do \
82
+ if [[ ! -f "${CUDA_HOME}/include/${HEADER_FILE}" ]]; \
83
+ then ln -s "/usr/include/${HEADER_FILE}" "${CUDA_HOME}/include/${HEADER_FILE}" ; \
84
+ fi; \
85
+ done
86
+
87
+ # Add qemu in here so that we can use this image on regular linux hosts with qemu user installed
88
+ ADD qemu-aarch64-static /usr/bin/qemu-aarch64-static
89
+ ADD qemu-ppc64le-static /usr/bin/qemu-ppc64le-static
90
+
88
91
# Add a file for users to source to activate the `conda`
89
92
# environment `base`. Also add a file that wraps that for
90
93
# use with the `ENTRYPOINT`.
91
94
COPY linux-anvil-cuda/entrypoint_source /opt/docker/bin/entrypoint_source
92
95
COPY scripts/entrypoint /opt/docker/bin/entrypoint
93
96
94
97
# Ensure that all containers start with tini and the user selected process.
95
- # Activate the `conda` environment `base`.
98
+ # Activate the `conda` environment `base` and the devtoolset compiler .
96
99
# Provide a default command (`bash`), which will start if the user doesn't specify one.
97
100
ENTRYPOINT [ "/opt/conda/bin/tini" , "--" , "/opt/docker/bin/entrypoint" ]
98
101
CMD [ "/bin/bash" ]
0 commit comments