1
1
# Set environment variables during runtime.
2
2
ARG CUDA_VER
3
+ ARG DISTRO_ARCH
3
4
ARG DISTRO_NAME
4
5
ARG DISTRO_VER
5
- FROM --platform=linux/amd64 nvidia/cuda:${CUDA_VER}-devel-${DISTRO_NAME}${DISTRO_VER}
6
+ FROM --platform=linux/${DISTRO_ARCH} nvidia/cuda:${CUDA_VER}-devel-${DISTRO_NAME}${DISTRO_VER}
6
7
7
8
LABEL maintainer=
"conda-forge <[email protected] >"
8
9
10
+ # Set `ARG`s during runtime.
9
11
ARG CUDA_VER
12
+ ARG DISTRO_ARCH
10
13
ARG DISTRO_NAME
11
14
ARG DISTRO_VER
12
15
ENV CUDA_VER=${CUDA_VER} \
16
+ DISTRO_ARCH=${DISTRO_ARCH} \
13
17
DISTRO_NAME=${DISTRO_NAME} \
14
18
DISTRO_VER=${DISTRO_VER}
15
19
16
20
# Set an encoding to make things work smoothly.
17
21
ENV LANG en_US.UTF-8
22
+ ENV LANGUAGE en_US.UTF-8
18
23
19
24
# Set path to CUDA install (this is a symlink to /usr/local/cuda-${CUDA_VER})
20
25
ENV CUDA_HOME /usr/local/cuda
21
26
22
- # we want to persist a path in ldconfig (to avoid having to always set LD_LIBRARY_PATH), but *after* the existing entries;
23
- # since entries in ld.so.conf.d have precedence before the preconfigured directories, we first add the latter to the former
24
- RUN ldconfig -v 2>/dev/null | grep -v ^$'\t ' | cut -f1 -d":" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf \
25
- && if [ ${CUDA_VER} != "9.2" ]; then \
26
- # the upstream images for 10.x all have libcuda.so under $CUDA_HOME/compat;
27
- # add this to the ldconfig so it will be found correctly.
28
- echo "$CUDA_HOME/compat" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf ; \
29
- else \
30
- # For 9.2, the image nvidia/cuda:9.2-devel-centos6 contains neither
31
- # $CUDA_HOME/compat, nor any (non-stub) libcuda.so. We fix this by
32
- # adding cuda-compat-10.0 (which is not used for building, but to
33
- # test if loading the respective library/package works). However,
34
- # due to licensing reasons, these cannot be part of the conda-forge
35
- # docker images, but are instead added for CI purposes in:
36
- # github.com/conda-forge/conda-forge-ci-setup-feedstock/blob/master/recipe/run_conda_forge_build_setup_linux
37
- # Here we only set the ldconfig accordingly.
38
- echo "/usr/local/cuda-10.0/compat" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf ; \
39
- fi \
40
- # don't forget to update settings by running ldconfig
41
- && ldconfig
42
-
43
27
# bust the docker cache so that we always rerun the installs below
44
28
ADD https://loripsum.net/api /opt/docker/etc/gibberish
45
29
30
+ # Add qemu in here so that we can use this image on regular linux hosts with qemu user installed
31
+ ADD qemu-aarch64-static /usr/bin/qemu-aarch64-static
32
+ ADD qemu-ppc64le-static /usr/bin/qemu-ppc64le-static
33
+
34
+ # we want to persist a path in ldconfig (to avoid having to always set LD_LIBRARY_PATH), but *after* the existing entries;
35
+ # since entries in ld.so.conf.d have precedence before the preconfigured directories, we first add the latter to the former
36
+ # the upstream images all have libcuda.so under $CUDA_HOME/compat;
37
+ # add this to the ldconfig so it will be found correctly.
38
+ # don't forget to update settings by running ldconfig
39
+ RUN ldconfig -v 2>/dev/null | grep -v ^$'\t ' | cut -f1 -d":" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf && \
40
+ echo "$CUDA_HOME/compat" >> /etc/ld.so.conf.d/cuda-$CUDA_VER.conf && \
41
+ ldconfig
42
+
46
43
# Add the archived repo URL and fix RPM imports
47
44
ADD centos7-repos /tmp/centos7-repos
48
45
ADD scripts/fix_rpm /opt/docker/bin/fix_rpm
49
46
RUN /opt/docker/bin/fix_rpm
50
47
51
- # Install basic requirements.
48
+ # Add custom `yum_clean_all` script before using `yum`
52
49
COPY scripts/yum_clean_all /opt/docker/bin/
50
+
51
+ # Install basic requirements.
53
52
RUN yum update -y --disablerepo=cuda && \
54
53
yum install -y \
55
54
bzip2 \
56
55
sudo \
57
56
tar \
58
- which && \
57
+ which \
58
+ && \
59
59
/opt/docker/bin/yum_clean_all
60
60
61
+ # Fix locale in UBI 8 images
62
+ # See https://github.com/CentOS/sig-cloud-instance-images/issues/154
63
+ RUN if [ "${DISTRO_NAME}${DISTRO_VER}" = "ubi8" ]; then \
64
+ yum install -y \
65
+ glibc-langpack-en \
66
+ && \
67
+ /opt/docker/bin/yum_clean_all; \
68
+ fi
69
+
61
70
# Remove preinclude system compilers
62
71
RUN rpm -e --nodeps --verbose gcc gcc-c++
63
72
@@ -76,26 +85,14 @@ RUN source /opt/conda/etc/profile.d/conda.sh && \
76
85
chgrp -R lucky /opt/conda && \
77
86
chmod -R g=u /opt/conda
78
87
79
- # Symlink CUDA headers that were moved from $CUDA_HOME/include to /usr/include
80
- # in CUDA 10.1.
81
- RUN for HEADER_FILE in cublas_api.h cublas.h cublasLt.h cublas_v2.h cublasXt.h nvblas.h; do \
82
- if [[ ! -f "${CUDA_HOME}/include/${HEADER_FILE}" ]]; \
83
- then ln -s "/usr/include/${HEADER_FILE}" "${CUDA_HOME}/include/${HEADER_FILE}" ; \
84
- fi; \
85
- done
86
-
87
- # Add qemu in here so that we can use this image on regular linux hosts with qemu user installed
88
- ADD qemu-aarch64-static /usr/bin/qemu-aarch64-static
89
- ADD qemu-ppc64le-static /usr/bin/qemu-ppc64le-static
90
-
91
88
# Add a file for users to source to activate the `conda`
92
89
# environment `base`. Also add a file that wraps that for
93
90
# use with the `ENTRYPOINT`.
94
91
COPY linux-anvil-cuda/entrypoint_source /opt/docker/bin/entrypoint_source
95
92
COPY scripts/entrypoint /opt/docker/bin/entrypoint
96
93
97
94
# Ensure that all containers start with tini and the user selected process.
98
- # Activate the `conda` environment `base` and the devtoolset compiler .
95
+ # Activate the `conda` environment `base`.
99
96
# Provide a default command (`bash`), which will start if the user doesn't specify one.
100
97
ENTRYPOINT [ "/opt/conda/bin/tini" , "--" , "/opt/docker/bin/entrypoint" ]
101
98
CMD [ "/bin/bash" ]
0 commit comments