diff --git a/release-tools/prow.sh b/release-tools/prow.sh index ff49f81..bc14bf8 100755 --- a/release-tools/prow.sh +++ b/release-tools/prow.sh @@ -16,16 +16,10 @@ # This script runs inside a Prow job. It can run unit tests ("make test") -# and E2E testing. This E2E testing covers different scenarios (see -# https://github.com/kubernetes/enhancements/pull/807): -# - running the stable hostpath example against a Kubernetes release -# - running the canary hostpath example against a Kubernetes release -# - building the component in the current repo and running the -# stable hostpath example with that one component replaced against -# a Kubernetes release -# +# and E2E testing. This E2E testing covers different scenarios. +# # The intended usage of this script is that individual repos import -# csi-release-tools, then link their top-level prow.sh to this or +# release-tools, then link their top-level prow.sh to this or # include it in that file. When including it, several of the variables # can be overridden in the top-level prow.sh to customize the script # for the repo. @@ -52,7 +46,7 @@ configvar () { eval echo "\$3:" "$1=\${$1}" } -# Takes the minor version of $CSI_PROW_KUBERNETES_VERSION and overrides it to +# Takes the minor version of $COSI_PROW_KUBERNETES_VERSION and overrides it to # $1 if they are equal minor versions. Ignores versions that begin with # "release-". override_k8s_version () { @@ -61,14 +55,14 @@ override_k8s_version () { # Ignore: See if you can use ${variable//search/replace} instead. # shellcheck disable=SC2001 - current_minor_version="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1\.\2/')" + current_minor_version="$(echo "${COSI_PROW_KUBERNETES_VERSION}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1\.\2/')" # Ignore: See if you can use ${variable//search/replace} instead. # shellcheck disable=SC2001 override_minor_version="$(echo "${1}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1\.\2/')" if [ "${current_minor_version}" == "${override_minor_version}" ]; then - CSI_PROW_KUBERNETES_VERSION="$1" - echo "Overriding CSI_PROW_KUBERNETES_VERSION with $1: $CSI_PROW_KUBERNETES_VERSION" + COSI_PROW_KUBERNETES_VERSION="$1" + echo "Overriding COSI_PROW_KUBERNETES_VERSION with $1: $COSI_PROW_KUBERNETES_VERSION" fi } @@ -98,28 +92,29 @@ configvar GOFLAGS_VENDOR "$( [ -d vendor ] && echo '-mod=vendor' )" "Go flags fo go_from_travis_yml () { grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//' } -configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code -configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e -configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below -configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below -configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below + +configvar COSI_K8S_GO_VERSION "1.15.5" "This will override the k8s version, sometime k8s version is incorrect to fetch from go downloads" +configvar COSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code +configvar COSI_PROW_GO_VERSION_E2E "${COSI_K8S_GO_VERSION}" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e +configvar COSI_PROW_GO_VERSION_KIND "${COSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on COSI_PROW_KIND_VERSION below +configvar COSI_PROW_GO_VERSION_GINKGO "${COSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on COSI_PROW_GINKGO_VERSION below # kind version to use. If the pre-installed version is different, # the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ # (if available), otherwise it is built from source. -configvar CSI_PROW_KIND_VERSION "v0.6.0" "kind" +configvar COSI_PROW_KIND_VERSION "v0.6.0" "kind" # ginkgo test runner version to use. If the pre-installed version is # different, the desired version is built from source. -configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo" +configvar COSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo" # Ginkgo runs the E2E test in parallel. The default is based on the number # of CPUs, but typically this can be set to something higher in the job. -configvar CSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)" +configvar COSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)" # Enables building the code in the repository. On by default, can be # disabled in jobs which only use pre-built components. -configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" +configvar COSI_PROW_BUILD_JOB true "building code in repo enabled" # Kubernetes version to test against. This must be a version number # (like 1.13.3) for which there is a pre-built kind image (see @@ -132,82 +127,30 @@ configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" # use the same settings as for "latest" Kubernetes. This works # as long as there are no breaking changes in Kubernetes, like # deprecating or changing the implementation of an alpha feature. -configvar CSI_PROW_KUBERNETES_VERSION 1.17.0 "Kubernetes" +configvar COSI_PROW_KUBERNETES_VERSION 1.17.0 "Kubernetes" # This is a hack to workaround the issue that each version # of kind currently only supports specific patch versions of -# Kubernetes. We need to override CSI_PROW_KUBERNETES_VERSION +# Kubernetes. We need to override COSI_PROW_KUBERNETES_VERSION # passed in by our CI/pull jobs to the versions that # kind v0.5.0 supports. # # If the version is prefixed with "release-", then nothing # is overridden. -override_k8s_version "1.15.3" +override_k8s_version "1.19.3" -# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and +# COSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and # with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST # instead of latest). # # This is used to derive the right defaults for the variables below # when a Prow job just defines the Kubernetes version. -csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')" +cosi_prow_kubernetes_version_suffix="$(echo "${COSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')" # Work directory. It has to allow running executables, therefore /tmp # is avoided. Cleaning up after the script is intentionally left to # the caller. -configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory" - -# By default, this script tests sidecars with the CSI hostpath driver, -# using the install_csi_driver function. That function depends on -# a deployment script that it searches for in several places: -# -# - The "deploy" directory in the current repository: this is useful -# for the situation that a component becomes incompatible with the -# shared deployment, because then it can (temporarily!) provide its -# own example until the shared one can be updated; it's also how -# csi-driver-host-path itself provides the example. -# -# - CSI_PROW_DRIVER_VERSION of the CSI_PROW_DRIVER_REPO is checked -# out: this allows other repos to reference a version of the example -# that is known to be compatible. -# -# - The /deploy directory can have multiple sub-directories, -# each with different deployments (stable set of images for Kubernetes 1.13, -# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.). -# This is necessary because there may be incompatible changes in the -# "API" of a component (for example, its command line options or RBAC rules) -# or in its support for different Kubernetes versions (CSIDriverInfo as -# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14). -# -# When testing an update for a component in a PR job, the -# CSI_PROW_DEPLOYMENT variable can be set in the -# .prow.sh of each component when there are breaking changes -# that require using a non-default deployment. The default -# is a deployment named "kubernetes-x.yy" (if available), -# otherwise "kubernetes-latest". -# "none" disables the deployment of the hostpath driver. -# -# When no deploy script is found (nothing in `deploy` directory, -# CSI_PROW_DRIVER_REPO=none), nothing gets deployed. -# -# If the deployment script is called with CSI_PROW_TEST_DRIVER= as -# environment variable, then it must write a suitable test driver configuration -# into that file in addition to installing the driver. -configvar CSI_PROW_DRIVER_VERSION "v1.3.0" "CSI driver version" -configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo" -configvar CSI_PROW_DEPLOYMENT "" "deployment" - -# The install_csi_driver function may work also for other CSI drivers, -# as long as they follow the conventions of the CSI hostpath driver. -# If they don't, then a different install function can be provided in -# a .prow.sh file and this config variable can be overridden. -configvar CSI_PROW_DRIVER_INSTALL "install_csi_driver" "name of the shell function which installs the CSI driver" - -# If CSI_PROW_DRIVER_CANARY is set (typically to "canary", but also -# version tag. Usually empty. CSI_PROW_HOSTPATH_CANARY is -# accepted as alternative name because some test-infra jobs -# still use that name. -configvar CSI_PROW_DRIVER_CANARY "${CSI_PROW_HOSTPATH_CANARY}" "driver image override for canary images" +configvar COSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/cosiprow.XXXXXXXXXX")" "work directory" # The E2E testing can come from an arbitrary repo. The expectation is that # the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836) @@ -215,36 +158,29 @@ configvar CSI_PROW_DRIVER_CANARY "${CSI_PROW_HOSTPATH_CANARY}" "driver image ove # then `make WHAT=test/e2e/e2e.test` is called first to ensure that # all generated files are present. # -# CSI_PROW_E2E_REPO=none disables E2E testing. +# COSI_PROW_E2E_REPO=none disables E2E testing. # TOOO: remove versioned variables and make e2e version match k8s version -configvar CSI_PROW_E2E_VERSION_1_15 v1.15.0 "E2E version for Kubernetes 1.15.x" -configvar CSI_PROW_E2E_VERSION_1_16 v1.16.0 "E2E version for Kubernetes 1.16.x" -configvar CSI_PROW_E2E_VERSION_1_17 v1.17.0 "E2E version for Kubernetes 1.17.x" -# TODO: add new CSI_PROW_E2E_VERSION entry for future Kubernetes releases -configvar CSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version -configvar CSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions -configvar CSI_PROW_E2E_IMPORT_PATH_LATEST k8s.io/kubernetes "E2E package for Kubernetes >= 1.13.x" # currently the same for all versions -configvar CSI_PROW_E2E_VERSION "$(get_versioned_variable CSI_PROW_E2E_VERSION "${csi_prow_kubernetes_version_suffix}")" "E2E version" -configvar CSI_PROW_E2E_REPO "$(get_versioned_variable CSI_PROW_E2E_REPO "${csi_prow_kubernetes_version_suffix}")" "E2E repo" -configvar CSI_PROW_E2E_IMPORT_PATH "$(get_versioned_variable CSI_PROW_E2E_IMPORT_PATH "${csi_prow_kubernetes_version_suffix}")" "E2E package" - -# csi-sanity testing from the csi-test repo can be run against the installed -# CSI driver. For this to work, deploying the driver must expose the Unix domain -# csi.sock as a TCP service for use by the csi-sanity command, which runs outside -# of the cluster. The alternative would have been to (cross-)compile csi-sanity -# and install it inside the cluster, which is not necessarily easier. -configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo" -configvar CSI_PROW_SANITY_VERSION 5421d9f3c37be3b95b241b44a094a3db11bee789 "csi-test version" # latest master -configvar CSI_PROW_SANITY_IMPORT_PATH github.com/kubernetes-csi/csi-test "csi-test package" -configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock" -configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver" -configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver" +configvar COSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version +configvar COSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions +configvar COSI_PROW_E2E_IMPORT_PATH_LATEST k8s.io/kubernetes "E2E package for Kubernetes >= 1.13.x" # currently the same for all versions +configvar COSI_PROW_E2E_VERSION "$(get_versioned_variable COSI_PROW_E2E_VERSION "${cosi_prow_kubernetes_version_suffix}")" "E2E version" +configvar COSI_PROW_E2E_REPO "$(get_versioned_variable COSI_PROW_E2E_REPO "${cosi_prow_kubernetes_version_suffix}")" "E2E repo" +configvar COSI_PROW_E2E_IMPORT_PATH "$(get_versioned_variable COSI_PROW_E2E_IMPORT_PATH "${cosi_prow_kubernetes_version_suffix}")" "E2E package" # The version of dep to use for 'make test-vendor'. Ignored if the project doesn't # use dep. Only binary releases of dep are supported (https://github.com/golang/dep/releases). -configvar CSI_PROW_DEP_VERSION v0.5.1 "golang dep version to be used for vendor checking" +configvar COSI_PROW_DEP_VERSION v0.5.1 "golang dep version to be used for vendor checking" -# Each job can run one or more of the following tests, identified by +// Version of the Spec used +configvar COSI_SPEC_VERSION master "version of the cosi spec will influence the crd object loaded for testing" + +// Version of the API used +configvar COSI_API_VERSION master "version of the cosi api will influence the api objects loaded for testing" + +// Version of the Controller used +configvar COSI_CONTROLLER_VERSION master "version of the cosi controller used for testing" + +# TODO Each job can run one or more of the following tests, identified by # a single word: # - unit testing # - parallel excluding alpha features @@ -255,16 +191,12 @@ configvar CSI_PROW_DEP_VERSION v0.5.1 "golang dep version to be used for vendor # # Unknown or unsupported entries are ignored. # -# Sanity testing with csi-sanity only covers the CSI driver itself and -# thus only makes sense in repos which provide their own CSI -# driver. Repos can enable sanity testing by setting -# CSI_PROW_TESTS_SANITY=sanity. -configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha sanity" "tests to run" +configvar COSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha sanity" "tests to run" tests_enabled () { local t1 t2 # We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a. # shellcheck disable=SC2206 - local tests=(${CSI_PROW_TESTS}) + local tests=(${COSI_PROW_TESTS}) for t1 in "$@"; do for t2 in "${tests[@]}"; do if [ "$t1" = "$t2" ]; then @@ -274,9 +206,7 @@ tests_enabled () { done return 1 } -sanity_enabled () { - [ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity" -} + tests_need_kind () { tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" || sanity_enabled @@ -289,74 +219,22 @@ tests_need_alpha_cluster () { tests_enabled "parallel-alpha" "serial-alpha" } -# Regex for non-alpha, feature-tagged tests that should be run. -# -# Starting with 1.17, snapshots is beta, but the E2E tests still have the -# [Feature:] tag. They need to be explicitly enabled. -configvar CSI_PROW_E2E_FOCUS_1_15 '^' "non-alpha, feature-tagged tests for Kubernetes = 1.15" # no tests to run, match nothing -configvar CSI_PROW_E2E_FOCUS_1_16 '^' "non-alpha, feature-tagged tests for Kubernetes = 1.16" # no tests to run, match nothing -configvar CSI_PROW_E2E_FOCUS_LATEST '\[Feature:VolumeSnapshotDataSource\]' "non-alpha, feature-tagged tests for Kubernetes >= 1.17" -configvar CSI_PROW_E2E_FOCUS "$(get_versioned_variable CSI_PROW_E2E_FOCUS "${csi_prow_kubernetes_version_suffix}")" "non-alpha, feature-tagged tests" - # Serial vs. parallel is always determined by these regular expressions. # Individual regular expressions are seperated by spaces for readability # and expected to not contain spaces. Use dots instead. The complete # regex for Ginkgo will be created by joining the individual terms. -configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests" +configvar COSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests" regex_join () { echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g' } -# Which tests are alpha depends on the Kubernetes version. We could -# use the same E2E test for all Kubernetes version. This would have -# the advantage that new tests can be applied to older versions -# without having to backport tests. -# -# But the feature tag gets removed from E2E tests when the corresponding -# feature becomes beta, so we would have to track which tests were -# alpha in previous Kubernetes releases. This was considered too -# error prone. Therefore we use E2E tests that match the Kubernetes -# version that is getting tested. -configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes >= 1.14" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough -configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests" - -# After the parallel E2E test without alpha features, a test cluster -# with alpha features is brought up and tests that were previously -# disabled are run. The alpha gates in each release have to be listed -# explicitly. If none are set (= variable empty), alpha testing -# is skipped. -# -# Testing against "latest" Kubernetes is problematic because some alpha -# feature which used to work might stop working or change their behavior -# such that the current tests no longer pass. If that happens, -# kubernetes-csi components must be updated, either by disabling -# the failing test for "latest" or by updating the test and not running -# it anymore for older releases. -configvar CSI_PROW_E2E_ALPHA_GATES_1_15 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.15" -configvar CSI_PROW_E2E_ALPHA_GATES_1_16 'VolumeSnapshotDataSource=true' "alpha feature gates for Kubernetes 1.16" -# TODO: add new CSI_PROW_ALPHA_GATES_xxx entry for future Kubernetes releases and -# add new gates to CSI_PROW_E2E_ALPHA_GATES_LATEST. -configvar CSI_PROW_E2E_ALPHA_GATES_LATEST '' "alpha feature gates for latest Kubernetes" -configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" - -# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment -configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0' "external-snapshotter version tag" - -# Some tests are known to be unusable in a KinD cluster. For example, -# stopping kubelet with "ssh systemctl stop kubelet" simply -# doesn't work. Such tests should be written in a way that they verify -# whether they can run with the current cluster provider, but until -# they are, we filter them out by name. Like the other test selection -# variables, this is again a space separated list of regular expressions. -# -# "different node" test skips can be removed once -# https://github.com/kubernetes/kubernetes/pull/82678 has been backported -# to all the K8s versions we test against -configvar CSI_PROW_E2E_SKIP 'Disruptive|different\s+node' "tests that need to be skipped" +configvar COSI_PROW_E2E_SKIP 'Disruptive|different\s+node' "tests that need to be skipped" + +configvar COSI_PROW_E2E_ALPHA "$(get_versioned_variable COSI_PROW_E2E_ALPHA "${cosi_prow_kubernetes_version_suffix}")" "alpha tests" # This is the directory for additional result files. Usually set by Prow, but # if not (for example, when invoking manually) it defaults to the work directory. -configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts" +configvar ARTIFACTS "${COSI_PROW_WORK}/artifacts" "artifacts" mkdir -p "${ARTIFACTS}" run () { @@ -378,9 +256,9 @@ die () { } # For additional tools. -CSI_PROW_BIN="${CSI_PROW_WORK}/bin" -mkdir -p "${CSI_PROW_BIN}" -PATH="${CSI_PROW_BIN}:$PATH" +COSI_PROW_BIN="${COSI_PROW_WORK}/bin" +mkdir -p "${COSI_PROW_BIN}" +PATH="${COSI_PROW_BIN}:$PATH" # Ensure that PATH has the desired version of the Go tools, then run command given as argument. # Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by @@ -393,46 +271,46 @@ run_with_go () { if ! [ "$version" ] || go version 2>/dev/null | grep -q "go$version"; then run "$@" else - if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then - run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${CSI_PROW_WORK}" -zxf - || die "installation of Go $version failed" - mv "${CSI_PROW_WORK}/go" "${CSI_PROW_WORK}/go-$version" + if ! [ -d "${COSI_PROW_WORK}/go-$version" ]; then + run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${COSI_PROW_WORK}" -zxf - || die "installation of Go $version failed" + mv "${COSI_PROW_WORK}/go" "${COSI_PROW_WORK}/go-$version" fi - PATH="${CSI_PROW_WORK}/go-$version/bin:$PATH" run "$@" + PATH="${COSI_PROW_WORK}/go-$version/bin:$PATH" run "$@" fi } # Ensure that we have the desired version of kind. install_kind () { - if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then + if kind --version 2>/dev/null | grep -q " ${COSI_PROW_KIND_VERSION}$"; then return fi - if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then - chmod u+x "${CSI_PROW_WORK}/bin/kind" + if run curl --fail --location -o "${COSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${COSI_PROW_KIND_VERSION}/kind-linux-amd64"; then + chmod u+x "${COSI_PROW_WORK}/bin/kind" else - git_checkout https://github.com/kubernetes-sigs/kind "${GOPATH}/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 && - (cd "${GOPATH}/src/sigs.k8s.io/kind" && make install INSTALL_DIR="${CSI_PROW_WORK}/bin") + git_checkout https://github.com/kubernetes-sigs/kind "${GOPATH}/src/sigs.k8s.io/kind" "${COSI_PROW_KIND_VERSION}" --depth=1 && + (cd "${GOPATH}/src/sigs.k8s.io/kind" && make install INSTALL_DIR="${COSI_PROW_WORK}/bin") fi } # Ensure that we have the desired version of the ginkgo test runner. install_ginkgo () { - # CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not. - if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then + # COSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not. + if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${COSI_PROW_GINKGO_VERSION}" ]; then return fi - git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 && + git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${COSI_PROW_GINKGO_VERSION}" --depth=1 && # We have to get dependencies and hence can't call just "go build". - run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" && - mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}" + run_with_go "${COSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" && + mv "$GOPATH/bin/ginkgo" "${COSI_PROW_BIN}" } # Ensure that we have the desired version of dep. install_dep () { - if dep version 2>/dev/null | grep -q "version:.*${CSI_PROW_DEP_VERSION}$"; then + if dep version 2>/dev/null | grep -q "version:.*${COSI_PROW_DEP_VERSION}$"; then return fi - run curl --fail --location -o "${CSI_PROW_WORK}/bin/dep" "https://github.com/golang/dep/releases/download/v0.5.4/dep-linux-amd64" && - chmod u+x "${CSI_PROW_WORK}/bin/dep" + run curl --fail --location -o "${COSI_PROW_WORK}/bin/dep" "https://github.com/golang/dep/releases/download/v0.5.4/dep-linux-amd64" && + chmod u+x "${COSI_PROW_WORK}/bin/dep" } # This checks out a repo ("https://github.com/kubernetes/kubernetes") @@ -458,7 +336,7 @@ git_checkout () { # Might have been because fetching by revision is not # supported by GitHub (https://github.com/isaacs/github/issues/436). # Fall back to fetching everything. - (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" + (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/cosiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" (cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed" fi # This is useful for local testing or when switching between different revisions in the same @@ -487,19 +365,6 @@ git_clone_branch () { (cd "$path" && run git clean -fdx) || die "failed to clean $path" } -list_gates () ( - set -f; IFS=',' - # Ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - set -- $1 - while [ "$1" ]; do - # Ignore: See if you can use ${variable//search/replace} instead. - # shellcheck disable=SC2001 - echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/' - shift - done -) - go_version_for_kubernetes () ( local path="$1" local version="$2" @@ -514,35 +379,36 @@ go_version_for_kubernetes () ( echo "$go_version" ) -csi_prow_kind_have_kubernetes=false +cosi_prow_kind_have_kubernetes=false # Brings up a Kubernetes cluster and sets KUBECONFIG. # Accepts additional feature gates in the form gate1=true|false,gate2=... start_cluster () { local image gates gates="$1" - if kind get clusters | grep -q csi-prow; then - run kind delete cluster --name=csi-prow || die "kind delete failed" + if kind get clusters | grep -q cosi-prow; then + run kind delete cluster --name=cosi-prow || die "kind delete failed" fi + echo "build k/k source" # Build from source? - if [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then - if ! ${csi_prow_kind_have_kubernetes}; then - local version="${CSI_PROW_KUBERNETES_VERSION}" + if [[ "${COSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then + if ! ${cosi_prow_kind_have_kubernetes}; then + local version="${COSI_PROW_KUBERNETES_VERSION}" if [ "$version" = "latest" ]; then version=master fi - git_clone_branch https://github.com/kubernetes/kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version" || die "checking out Kubernetes $version failed" + git_clone_branch https://github.com/kubernetes/kubernetes "${COSI_PROW_WORK}/src/kubernetes" "$version" || die "checking out Kubernetes $version failed" - go_version="$(go_version_for_kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" - run_with_go "$go_version" kind build node-image --type bazel --image csiprow/node:latest --kube-root "${CSI_PROW_WORK}/src/kubernetes" || die "'kind build node-image' failed" - csi_prow_kind_have_kubernetes=true + go_version="$(go_version_for_kubernetes "${COSI_PROW_WORK}/src/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" + run_with_go "$go_version" kind build node-image --type bazel --image cosiprow/node:latest --kube-root "${COSI_PROW_WORK}/src/kubernetes" || die "'kind build node-image' failed" + cosi_prow_kind_have_kubernetes=true fi - image="csiprow/node:latest" + image="cosiprow/node:latest" else - image="kindest/node:v${CSI_PROW_KUBERNETES_VERSION}" + image="kindest/node:v${COSI_PROW_KUBERNETES_VERSION}" fi - cat >"${CSI_PROW_WORK}/kind-config.yaml" <"${COSI_PROW_WORK}/kind-config.yaml" <>"${CSI_PROW_WORK}/kind-config.yaml" <>"${COSI_PROW_WORK}/kind-config.yaml" </dev/null; wait) - info "For container output see job artifacts." - die "deploying the CSI driver with ${deploy_driver} failed" - fi +kubectl_apply () { + // TODO once this CRD is part of core replace it with 'kubectl apply -f $1 --validate=false' + curl $1 | sed '/annotations/ a \ \ "api-approved.kubernetes.io": "https://github.com/kubernetes-sigs/container-object-storage-interface-api/pull/2"' | kubectl apply -f - --validate=false } -# Installs all nessesary snapshotter CRDs -install_snapshot_crds() { - # Wait until volumesnapshot CRDs are in place. - CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/config/crd" - kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotclasses.yaml" --validate=false - kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshots.yaml" --validate=false - kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotcontents.yaml" --validate=false +# Installs all nessesary CRDs +install_crds() { + # Wait until cosi CRDs are in place. + CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-sigs/container-object-storage-interface-api/${COSI_SPEC_VERSION}/crds" + kubectl_apply "${CRD_BASE_DIR}/objectstorage.k8s.io_bucketclasses.yaml" + kubectl_apply "${CRD_BASE_DIR}/objectstorage.k8s.io_bucketrequests.yaml" + kubectl_apply "${CRD_BASE_DIR}/objectstorage.k8s.io_buckets.yaml" + kubectl_apply "${CRD_BASE_DIR}/objectstorage.k8s.io_bucketaccessclasses.yaml" + kubectl_apply "${CRD_BASE_DIR}/objectstorage.k8s.io_bucketaccessrequests.yaml" + kubectl_apply "${CRD_BASE_DIR}/objectstorage.k8s.io_bucketaccesses.yaml" cnt=0 - until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io \ - && kubectl get volumesnapshots.snapshot.storage.k8s.io \ - && kubectl get volumesnapshotcontents.snapshot.storage.k8s.io; do + until kubectl get bucketaccessclasses.objectstorage.k8s.io \ + && kubectl get bucketaccessrequests.objectstorage.k8s.io \ + && kubectl get bucketaccesses.objectstorage.k8s.io \ + && kubectl get bucketclasses.objectstorage.k8s.io \ + && kubectl get bucketrequests.objectstorage.k8s.io \ + && kubectl get buckets.objectstorage.k8s.io; do if [ $cnt -gt 30 ]; then - echo >&2 "ERROR: snapshot CRDs not ready after over 1 min" + echo >&2 "ERROR: cosi CRDs not ready after over 1 min" exit 1 fi - echo "$(date +%H:%M:%S)" "waiting for snapshot CRDs, attempt #$cnt" + echo "$(date +%H:%M:%S)" "waiting for cosi CRDs, attempt #$cnt" cnt=$((cnt + 1)) sleep 2 done } -# Install snapshot controller and associated RBAC, retrying until the pod is running. -install_snapshot_controller() { - kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml" +# Install controller and associated RBAC, retrying until the pod is running. +install_controller() { + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-sigs/container-object-storage-interface-controller/${COSI_CONTROLLER_VERSION}/deploy/base/sa.yaml" + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-sigs/container-object-storage-interface-controller/${COSI_CONTROLLER_VERSION}/deploy/base/rbac.yaml" cnt=0 - until kubectl get clusterrolebinding snapshot-controller-role; do + until kubectl get clusterrolebinding objectstorage-controller; do if [ $cnt -gt 30 ]; then echo "Cluster role bindings:" kubectl describe clusterrolebinding - echo >&2 "ERROR: snapshot controller RBAC not ready after over 5 min" + echo >&2 "ERROR: controller RBAC not ready after over 5 min" exit 1 - fi - echo "$(date +%H:%M:%S)" "waiting for snapshot RBAC setup complete, attempt #$cnt" - cnt=$((cnt + 1)) - sleep 10 + fi + echo "$(date +%H:%M:%S)" "waiting for cosi RBAC setup complete, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 10 done - kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-sigs/container-object-storage-interface-controller/${COSI_CONTROLLER_VERSION}/deploy/base/deployment.yaml" cnt=0 - expected_running_pods=$(curl https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${CSI_SNAPSHOTTER_VERSION}"/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | grep replicas | cut -d ':' -f 2-) - while [ "$(kubectl get pods -l app=snapshot-controller | grep 'Running' -c)" -lt "$expected_running_pods" ]; do + kubectl get pods + kubectl get pods -l app=objectstorage-controller + expected_running_pods=$(curl "https://raw.githubusercontent.com/kubernetes-sigs/container-object-storage-interface-controller/${COSI_CONTROLLER_VERSION}/deploy/base/deployment.yaml" | grep replicas | cut -d ':' -f 2-) + while [ "$(kubectl get pods -l app.kubernetes.io/name=container-object-storage-interface-controller | grep 'Running' -c)" -lt "$expected_running_pods" ]; do if [ $cnt -gt 30 ]; then - echo "snapshot-controller pod status:" - kubectl describe pods -l app=snapshot-controller - echo >&2 "ERROR: snapshot controller not ready after over 5 min" + echo "objectstorage-controller pod status:" + kubectl describe pods -l app.kubernetes.io/name=container-object-storage-interface-controller + echo >&2 "ERROR: cosi controller not ready after over 5 min" exit 1 fi - echo "$(date +%H:%M:%S)" "waiting for snapshot controller deployment to complete, attempt #$cnt" + echo "$(date +%H:%M:%S)" "waiting for cosi controller deployment to complete, attempt #$cnt" cnt=$((cnt + 1)) sleep 10 done @@ -764,16 +606,6 @@ $(kubectl version) Driver installation in default namespace: $(kubectl get all) -Images in cluster: -REPOSITORY TAG REVISION -$( -# Here we iterate over all images that are in use and print some information about them. -# The "revision" label is where our build process puts the version number and revision, -# which is always unique, in contrast to the tag (think "canary"...). -docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep -e csi -e hostpath | while read -r repo tag id; do - echo "$repo" "$tag" "$(docker exec csi-prow-control-plane docker image inspect --format='{{ index .Config.Labels "revision"}}' "$id")" -done -) ========================================================= EOF @@ -795,33 +627,22 @@ start_loggers () { done } -# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test". +# Makes the E2E test suite binary available as "${COSI_PROW_WORK}/e2e.test". install_e2e () { - if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then + if [ -e "${COSI_PROW_WORK}/e2e.test" ]; then return fi - git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 && - if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then - go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" && - run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && - ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" + git_checkout "${COSI_PROW_E2E_REPO}" "${GOPATH}/src/${COSI_PROW_E2E_IMPORT_PATH}" "${COSI_PROW_E2E_VERSION}" --depth=1 && + if [ "${COSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then + go_version="${COSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${COSI_PROW_E2E_IMPORT_PATH}" "${COSI_PROW_E2E_VERSION}")}" && + run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${COSI_PROW_E2E_IMPORT_PATH}" && + ln -s "${GOPATH}/src/${COSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${COSI_PROW_WORK}" else - run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e" + run_with_go "${COSI_PROW_GO_VERSION_E2E}" go test -c -o "${COSI_PROW_WORK}/e2e.test" "${COSI_PROW_E2E_IMPORT_PATH}/test/e2e" fi } -# Makes the csi-sanity test suite binary available as -# "${CSI_PROW_WORK}/csi-sanity". -install_sanity () ( - if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then - return - fi - - git_checkout "${CSI_PROW_SANITY_REPO}" "${GOPATH}/src/${CSI_PROW_SANITY_IMPORT_PATH}" "${CSI_PROW_SANITY_VERSION}" --depth=1 || die "checking out csi-sanity failed" - run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed" -) - # Captures pod output while running some other command. run_with_loggers () ( loggers=$(start_loggers -f) @@ -832,7 +653,7 @@ run_with_loggers () ( # Invokes the filter-junit.go tool. run_filter_junit () { - run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go run "${RELEASE_TOOLS_ROOT}/filter-junit.go" "$@" + run_with_go "${COSI_PROW_GO_VERSION_BUILD}" go run "${RELEASE_TOOLS_ROOT}/filter-junit.go" "$@" } # Runs the E2E test suite in a sub-shell. @@ -853,61 +674,9 @@ run_e2e () ( } trap move_junit EXIT - cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && - run_with_loggers ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/test-driver.yaml" -) - -# Run csi-sanity against installed CSI driver. -run_sanity () ( - install_sanity || die "installing csi-sanity failed" - - cat >"${CSI_PROW_WORK}/mkdir_in_pod.sh" <"${CSI_PROW_WORK}/rmdir_in_pod.sh" <&1 | make_test_to_junit; then + if ! run_with_go "${COSI_PROW_GO_VERSION_BUILD}" make -k test "GOFLAGS_VENDOR=${GOFLAGS_VENDOR}" 2>&1 | make_test_to_junit; then warn "'make test' failed, proceeding anyway" ret=1 fi fi # Required for E2E testing. - run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container "GOFLAGS_VENDOR=${GOFLAGS_VENDOR}" || die "'make container' failed" + run_with_go "${COSI_PROW_GO_VERSION_BUILD}" make container "GOFLAGS_VENDOR=${GOFLAGS_VENDOR}" || die "'make container' failed" fi if tests_need_kind; then install_kind || die "installing kind failed" - if ${CSI_PROW_BUILD_JOB}; then + if ${COSI_PROW_BUILD_JOB}; then cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" # Get the image that was just built (if any) from the # top-level Makefile CMDS variable and set the @@ -1056,12 +825,12 @@ main () { # side-load those images into the cluster. for i in $cmds; do e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) - images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" + images="$images ${e}_REGISTRY=quay.io/containerobjectstorage ${e}_TAG=cosiprow" # We must avoid the tag "latest" because that implies # always pulling the image # (https://github.com/kubernetes-sigs/kind/issues/328). - docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" + docker tag "$i:latest" "$i:cosiprow" || die "tagging the locally built container image for $i failed" done if [ -e deploy/kubernetes/rbac.yaml ]; then @@ -1079,104 +848,42 @@ main () { if tests_need_non_alpha_cluster; then start_cluster || die "starting the non-alpha cluster failed" - # Install necessary snapshot CRDs and snapshot controller - # For Kubernetes 1.17+, we will install the CRDs and snapshot controller. - if version_gt "${CSI_PROW_KUBERNETES_VERSION}" "1.16.255" || "${CSI_PROW_KUBERNETES_VERSION}" == "latest"; then - info "Version ${CSI_PROW_KUBERNETES_VERSION}, installing CRDs and snapshot controller" - install_snapshot_crds - install_snapshot_controller + # Install necessary CRDs and controllers + # For Kubernetes 1.19+, we will install the CRDs and controller. + if version_gt "${COSI_PROW_KUBERNETES_VERSION}" "1.16.255" || "${COSI_PROW_KUBERNETES_VERSION}" == "latest"; then + info "Version ${COSI_PROW_KUBERNETES_VERSION}, installing CRDs and cosi controller" + install_crds + install_controller else - info "Version ${CSI_PROW_KUBERNETES_VERSION}, skipping CRDs and snapshot controller" + info "Version ${COSI_PROW_KUBERNETES_VERSION}, skipping CRDs and cosi controller" fi - # Installing the driver might be disabled. - if ${CSI_PROW_DRIVER_INSTALL} "$images"; then - collect_cluster_info - - if sanity_enabled; then - if ! run_sanity; then - ret=1 - fi - fi - - if tests_enabled "parallel"; then - # Ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ - -focus="External.Storage" \ - -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E parallel failed" - ret=1 - fi - - # Run tests that are feature tagged, but non-alpha - # Ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - if ! run_e2e parallel-features ${CSI_PROW_GINKO_PARALLEL} \ - -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_FOCUS}"))" \ - -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}")"; then - warn "E2E parallel features failed" - ret=1 - fi - fi + collect_cluster_info - if tests_enabled "serial"; then - if ! run_e2e serial \ - -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ - -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E serial failed" - ret=1 - fi + if tests_enabled "parallel"; then + if ! run_e2e parallel ${COSI_PROW_GINKO_PARALLEL} \ + -focus="ObjectStorage" \ + -skip="$(regex_join "${COSI_PROW_E2E_SERIAL}" "${COSI_PROW_E2E_ALPHA}" "${COSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel failed" + ret=1 fi fi - delete_cluster_inside_prow_job - fi - - if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then - # Need to (re)create the cluster. - start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" - - # Install necessary snapshot CRDs and snapshot controller - # For Kubernetes 1.17+, we will install the CRDs and snapshot controller. - if version_gt "${CSI_PROW_KUBERNETES_VERSION}" "1.16.255" || "${CSI_PROW_KUBERNETES_VERSION}" == "latest"; then - info "Version ${CSI_PROW_KUBERNETES_VERSION}, installing CRDs and snapshot controller" - install_snapshot_crds - install_snapshot_controller - else - info "Version ${CSI_PROW_KUBERNETES_VERSION}, skipping CRDs and snapshot controller" - fi - - # Installing the driver might be disabled. - if ${CSI_PROW_DRIVER_INSTALL} "$images"; then - collect_cluster_info - - if tests_enabled "parallel-alpha"; then - # Ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ - -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ - -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E parallel alpha failed" - ret=1 - fi - fi - if tests_enabled "serial-alpha"; then - if ! run_e2e serial-alpha \ - -focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ - -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E serial alpha failed" - ret=1 - fi + if tests_enabled "serial"; then + if ! run_e2e serial \ + -focus="ObjectStorage.*" \ + -skip="$(regex_join "${COSI_PROW_E2E_ALPHA}" "${COSI_PROW_E2E_SKIP}")"; then + warn "E2E serial failed" + ret=1 fi fi - delete_cluster_inside_prow_job + fi + delete_cluster_inside_prow_job fi - # Merge all junit files into one. This gets rid of duplicated "skipped" tests. if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then - run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}" + run_filter_junit -o "${COSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${COSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}" fi return "$ret"