diff --git a/Makefile b/Makefile index 6a5c846..76deb15 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,73 @@ +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin + +## Tool Binaries +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +KCP ?= $(LOCALBIN)/kcp +KUBECTL_KCP ?= $(LOCALBIN)/kubectl-kcp +YQ ?= $(LOCALBIN)/yq + +## Tool Versions +KUSTOMIZE_VERSION ?= v3.8.7 +CONTROLLER_TOOLS_VERSION ?= v0.8.0 +KCP_VERSION ?= 0.7.5 +YQ_VERSION ?= v4.27.2 + +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" +$(KUSTOMIZE): ## Download kustomize locally if necessary. + mkdir -p $(LOCALBIN) + curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN) + touch $(KUSTOMIZE) # we download an "old" file, so make will re-download to refresh it unless we make it newer than the owning dir + +$(CONTROLLER_GEN): ## Download controller-gen locally if necessary. + mkdir -p $(LOCALBIN) + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +$(YQ): ## Download yq locally if necessary. + mkdir -p $(LOCALBIN) + GOBIN=$(LOCALBIN) go install github.com/mikefarah/yq/v4@$(YQ_VERSION) + +OS ?= $(shell go env GOOS ) +ARCH ?= $(shell go env GOARCH ) + +$(KCP): ## Download kcp locally if necessary. + mkdir -p $(LOCALBIN) + curl -L -s -o - https://github.com/kcp-dev/kcp/releases/download/v$(KCP_VERSION)/kcp_$(KCP_VERSION)_$(OS)_$(ARCH).tar.gz | tar --directory $(LOCALBIN)/../ -xvzf - bin/kcp + touch $(KCP) # we download an "old" file, so make will re-download to refresh it unless we make it newer than the owning dir + +$(KUBECTL_KCP): ## Download kcp kubectl plugins locally if necessary. + mkdir -p $(LOCALBIN) + curl -L -s -o - https://github.com/kcp-dev/kcp/releases/download/v$(KCP_VERSION)/kubectl-kcp-plugin_$(KCP_VERSION)_$(OS)_$(ARCH).tar.gz | tar --directory $(LOCALBIN)/../ -xvzf - bin + touch $(KUBECTL_KCP) # we download an "old" file, so make will re-download to refresh it unless we make it newer than the owning dir -# Image URL to use all building/pushing image targets -IMG ?= controller:latest +$(ENVTEST): ## Download envtest locally if necessary. + mkdir -p $(LOCALBIN) + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + +# Image registry and URL to use all building/pushing image targets +REGISTRY ?= localhost +IMG ?= controller:test # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. ENVTEST_K8S_VERSION = 1.23 @@ -23,35 +90,18 @@ all: build # kcp specific APIEXPORT_PREFIX ?= today -##@ General - -# The help target prints out all targets with their descriptions organized -# beneath their categories. The categories are represented by '##@' and the -# target descriptions by '##'. The awk commands is responsible for reading the -# entire set of makefiles included in this invocation, looking for lines of the -# file as xyz: ## something, and then pretty-format the target and help. Then, -# if there's a line with ##@ something, that gets pretty-printed as a category. -# More info on the usage of ANSI control characters for terminal formatting: -# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters -# More info on the awk command: -# http://linuxcommand.org/lc3_adv_awk.php - -.PHONY: help -help: ## Display this help. - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) - ##@ Development .PHONY: manifests -manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. +manifests: $(CONTROLLER_GEN) ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases .PHONY: apiresourceschemas -apiresourceschemas: kustomize ## Convert CRDs from config/crds to APIResourceSchemas. Specify APIEXPORT_PREFIX as needed. +apiresourceschemas: $(KUSTOMIZE) ## Convert CRDs from config/crds to APIResourceSchemas. Specify APIEXPORT_PREFIX as needed. $(KUSTOMIZE) build config/crd | kubectl kcp crd snapshot -f - --prefix $(APIEXPORT_PREFIX) > config/kcp/$(APIEXPORT_PREFIX).apiresourceschemas.yaml .PHONY: generate -generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. +generate: $(CONTROLLER_GEN) ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." .PHONY: fmt @@ -63,8 +113,76 @@ vet: ## Run go vet against code. go vet ./... .PHONY: test -test: manifests generate fmt vet envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out +test: manifests generate fmt vet $(ENVTEST) ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./controllers/... -coverprofile cover.out + +ARTIFACT_DIR ?= .test + +.PHONY: test-e2e +test-e2e: $(ARTIFACT_DIR)/kind.kubeconfig kcp-synctarget ready-deployment run-test-e2e## Set up prerequisites and run end-to-end tests on a cluster. + +.PHONY: run-test-e2e +run-test-e2e: ## Run end-to-end tests on a cluster. + go test ./test/e2e/... --kubeconfig $(abspath $(ARTIFACT_DIR)/kcp.kubeconfig) --workspace $(shell $(KCP_KUBECTL) kcp workspace . --short) + +.PHONY: ready-deployment +ready-deployment: KUBECONFIG = $(ARTIFACT_DIR)/kcp.kubeconfig +ready-deployment: kind-image install deploy apibinding ## Deploy the controller-manager and wait for it to be ready. + $(KCP_KUBECTL) --namespace "controller-runtime-example-system" rollout status deployment/controller-runtime-example-controller-manager + +# TODO(skuznets|ncdc): this APIBinding is not needed, but here only to work around https://github.com/kcp-dev/kcp/issues/1183 - remove it once that is fixed +.PHONY: apibinding +apibinding: + $( eval WORKSPACE = $(shell $(KCP_KUBECTL) kcp workspace . --short)) + sed 's/WORKSPACE/$(WORKSPACE)/' ./test/e2e/apibinding.yaml | $(KCP_KUBECTL) apply -f - + $(KCP_KUBECTL) wait --for=condition=Ready apibinding/data.my.domain + +.PHONY: kind-image +kind-image: docker-build ## Load the controller-manager image into the kind cluster. + kind load docker-image $(REGISTRY)/$(IMG) --name controller-runtime-example + +$(ARTIFACT_DIR)/kind.kubeconfig: $(ARTIFACT_DIR) ## Run a kind cluster and generate a $KUBECONFIG for it. + @if ! kind get clusters --quiet | grep --quiet controller-runtime-example; then kind create cluster --name controller-runtime-example; fi + kind get kubeconfig --name controller-runtime-example > $(ARTIFACT_DIR)/kind.kubeconfig + +$(ARTIFACT_DIR): ## Create a directory for test artifacts. + mkdir -p $(ARTIFACT_DIR) + +KCP_KUBECTL ?= PATH=$(LOCALBIN):$(PATH) KUBECONFIG=$(ARTIFACT_DIR)/kcp.kubeconfig kubectl +KIND_KUBECTL ?= kubectl --kubeconfig $(ARTIFACT_DIR)/kind.kubeconfig + +.PHONY: kcp-synctarget +kcp-synctarget: kcp-workspace $(ARTIFACT_DIR)/syncer.yaml $(YQ) ## Add the kind cluster to kcp as a target for workloads. + $(KIND_KUBECTL) apply -f $(ARTIFACT_DIR)/syncer.yaml + $(eval DEPLOYMENT_NAME = $(shell $(YQ) 'select(.kind=="Deployment") | .metadata.name' < $(ARTIFACT_DIR)/syncer.yaml )) + $(eval DEPLOYMENT_NAMESPACE = $(shell $(YQ) 'select(.kind=="Deployment") | .metadata.namespace' < $(ARTIFACT_DIR)/syncer.yaml )) + $(KIND_KUBECTL) --namespace $(DEPLOYMENT_NAMESPACE) rollout status deployment/$(DEPLOYMENT_NAME) + @if [[ ! -s $(ARTIFACT_DIR)/syncer.log ]]; then ( $(KIND_KUBECTL) --namespace $(DEPLOYMENT_NAMESPACE) logs deployment/$(DEPLOYMENT_NAME) -f >$(ARTIFACT_DIR)/syncer.log 2>&1 & ); fi + $(KCP_KUBECTL) wait --for=condition=Ready synctarget/controller-runtime + +$(ARTIFACT_DIR)/syncer.yaml: ## Generate the manifests necessary to register the kind cluster with kcp. + $(KCP_KUBECTL) kcp workload sync controller-runtime --resources services --syncer-image ghcr.io/kcp-dev/kcp/syncer:v$(KCP_VERSION) --output-file $(ARTIFACT_DIR)/syncer.yaml + +.PHONY: kcp-workspace +kcp-workspace: $(KUBECTL_KCP) kcp-server ## Create a workspace in kcp for the controller-manager. + $(KCP_KUBECTL) kcp workspace use '~' + @if ! $(KCP_KUBECTL) kcp workspace use controller-runtime-example; then $(KCP_KUBECTL) kcp workspace create controller-runtime-example --type universal --enter; fi + +.PHONY: kcp-server +kcp-server: $(KCP) $(ARTIFACT_DIR)/kcp ## Run the kcp server. + @if [[ ! -s $(ARTIFACT_DIR)/kcp.log ]]; then ( $(KCP) start -v 5 --root-directory $(ARTIFACT_DIR)/kcp --kubeconfig-path $(ARTIFACT_DIR)/kcp.kubeconfig --audit-log-maxsize 1024 --audit-log-mode=batch --audit-log-batch-max-wait=1s --audit-log-batch-max-size=1000 --audit-log-batch-buffer-size=10000 --audit-log-batch-throttle-burst=15 --audit-log-batch-throttle-enable=true --audit-log-batch-throttle-qps=10 --audit-policy-file ./test/e2e/audit-policy.yaml --audit-log-path $(ARTIFACT_DIR)/audit.log >$(ARTIFACT_DIR)/kcp.log 2>&1 & ); fi + @while true; do if [[ ! -s $(ARTIFACT_DIR)/kcp.kubeconfig ]]; then sleep 0.2; else break; fi; done + @while true; do if ! kubectl --kubeconfig $(ARTIFACT_DIR)/kcp.kubeconfig get --raw /readyz >$(ARTIFACT_DIR)/kcp.probe.log 2>&1; then sleep 0.2; else break; fi; done + +$(ARTIFACT_DIR)/kcp: ## Create a directory for the kcp server data. + mkdir -p $(ARTIFACT_DIR)/kcp + +.PHONY: test-e2e-cleanup +test-e2e-cleanup: ## Clean up processes and directories from an end-to-end test run. + kind delete cluster --name controller-runtime-example || true + rm -rf $(ARTIFACT_DIR) || true + pkill -sigterm kcp || true + pkill -sigterm kubectl || true ##@ Build @@ -81,11 +199,11 @@ run: manifests generate fmt vet ## Run a controller from your host. .PHONY: docker-build docker-build: build ## Build docker image with the manager. - docker build -t ${IMG} . + docker build -t ${REGISTRY}/${IMG} . .PHONY: docker-push docker-push: ## Push docker image with the manager. - docker push ${IMG} + docker push ${REGISTRY}/${IMG} ##@ Deployment @@ -93,56 +211,26 @@ ifndef ignore-not-found ignore-not-found = false endif +KUBECONFIG ?= $(abspath ~/.kube/config ) + .PHONY: install -install: manifests kustomize ## Install APIResourceSchemas and APIExport into kcp (using $KUBECONFIG or ~/.kube/config). - $(KUSTOMIZE) build config/kcp | kubectl apply -f - +install: manifests $(KUSTOMIZE) ## Install APIResourceSchemas and APIExport into kcp (using $KUBECONFIG or ~/.kube/config). + $(KUSTOMIZE) build config/kcp | kubectl --kubeconfig $(KUBECONFIG) apply -f - .PHONY: uninstall -uninstall: manifests kustomize ## Uninstall APIResourceSchemas and APIExport from kcp (using $KUBECONFIG or ~/.kube/config). Call with ignore-not-found=true to ignore resource not found errors during deletion. - $(KUSTOMIZE) build config/kcp | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +uninstall: manifests $(KUSTOMIZE) ## Uninstall APIResourceSchemas and APIExport from kcp (using $KUBECONFIG or ~/.kube/config). Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/kcp | kubectl --kubeconfig $(KUBECONFIG) delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy-crd -deploy-crd: manifests kustomize ## Deploy controller - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default-crd | kubectl apply -f - || true +deploy-crd: manifests $(KUSTOMIZE) ## Deploy controller + cd config/manager && $(KUSTOMIZE) edit set image controller=${REGISTRY}/${IMG} + $(KUSTOMIZE) build config/default-crd | kubectl --kubeconfig $(KUBECONFIG) apply -f - || true .PHONY: deploy -deploy: manifests kustomize ## Deploy controller - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default | kubectl apply -f - +deploy: manifests $(KUSTOMIZE) ## Deploy controller + cd config/manager && $(KUSTOMIZE) edit set image controller=${REGISTRY}/${IMG} + $(KUSTOMIZE) build config/default | kubectl --kubeconfig $(KUBECONFIG) apply -f - .PHONY: undeploy undeploy: ## Undeploy controller. Call with ignore-not-found=true to ignore resource not found errors during deletion. - $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - - -##@ Build Dependencies - -## Location to install dependencies to -LOCALBIN ?= $(shell pwd)/bin -$(LOCALBIN): - mkdir -p $(LOCALBIN) - -## Tool Binaries -KUSTOMIZE ?= $(LOCALBIN)/kustomize -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -ENVTEST ?= $(LOCALBIN)/setup-envtest - -## Tool Versions -KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.8.0 - -KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -$(KUSTOMIZE): $(LOCALBIN) - curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN) - -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) - -.PHONY: envtest -envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. -$(ENVTEST): $(LOCALBIN) - GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + $(KUSTOMIZE) build config/default | kubectl --kubeconfig $(KUBECONFIG) delete --ignore-not-found=$(ignore-not-found) -f - \ No newline at end of file diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5e793dd..f68b412 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -12,5 +12,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: controller - newTag: latest + newName: localhost/controller + newTag: test diff --git a/test/e2e/apibinding.yaml b/test/e2e/apibinding.yaml new file mode 100644 index 0000000..d2ba7fa --- /dev/null +++ b/test/e2e/apibinding.yaml @@ -0,0 +1,13 @@ +apiVersion: apis.kcp.dev/v1alpha1 +kind: APIBinding +metadata: + name: data.my.domain +spec: + reference: + workspace: + path: WORKSPACE + exportName: data.my.domain + acceptedPermissionClaims: + - resource: "secrets" + - resource: "configmaps" + - resource: "namespaces" \ No newline at end of file diff --git a/test/e2e/audit-policy.yaml b/test/e2e/audit-policy.yaml new file mode 100644 index 0000000..9b1b038 --- /dev/null +++ b/test/e2e/audit-policy.yaml @@ -0,0 +1,30 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +omitStages: + - RequestReceived +omitManagedFields: true +rules: + - level: None + nonResourceURLs: + - "/api*" + - "/version" + + - level: Metadata + resources: + - group: "" + resources: ["secrets", "configmaps"] + - group: "authorization.k8s.io" + resources: ["subjectaccessreviews"] + + - level: Metadata + verbs: ["list", "watch"] + + - level: Metadata + verbs: ["get", "delete"] + omitStages: + - ResponseStarted + + - level: RequestResponse + verbs: ["create", "update", "patch"] + omitStages: + - ResponseStarted diff --git a/test/e2e/controller_test.go b/test/e2e/controller_test.go new file mode 100644 index 0000000..659b0a5 --- /dev/null +++ b/test/e2e/controller_test.go @@ -0,0 +1,346 @@ +package e2e + +import ( + "context" + "flag" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + kcpclienthelper "github.com/kcp-dev/apimachinery/pkg/client" + + apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" + tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/apis/third_party/conditions/util/conditions" + + "github.com/kcp-dev/logicalcluster/v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + + datav1alpha1 "github.com/kcp-dev/controller-runtime-example/api/v1alpha1" +) + +// The tests in this package expect to be called when: +// - kcp is running +// - a kind cluster is up and running +// - it is hosting a syncer, and the SyncTarget is ready to go +// - the controller-manager from this repo is deployed to kcp +// - that deployment is synced to the kind cluster +// - the deployment is rolled out & ready +// +// We can then check that the controllers defined here are working as expected. + +var workspaceName string + +func init() { + rand.Seed(time.Now().Unix()) + flag.StringVar(&workspaceName, "workspace", "", "Workspace in which to run these tests.") +} + +func parentWorkspace(t *testing.T) logicalcluster.Name { + flag.Parse() + if workspaceName == "" { + t.Fatal("--workspace cannot be empty") + } + + return logicalcluster.New(workspaceName) +} + +func loadClusterConfig(t *testing.T, clusterName logicalcluster.Name) *rest.Config { + t.Helper() + restConfig, err := config.GetConfigWithContext("base") + if err != nil { + t.Fatalf("failed to load *rest.Config: %v", err) + } + return rest.AddUserAgent(kcpclienthelper.ConfigWithCluster(restConfig, clusterName), t.Name()) +} + +func loadClient(t *testing.T, clusterName logicalcluster.Name) client.Client { + t.Helper() + scheme := runtime.NewScheme() + if err := clientgoscheme.AddToScheme(scheme); err != nil { + t.Fatalf("failed to add client go to scheme: %v", err) + } + if err := tenancyv1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("failed to add %s to scheme: %v", tenancyv1alpha1.SchemeGroupVersion, err) + } + if err := datav1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("failed to add %s to scheme: %v", datav1alpha1.GroupVersion, err) + } + if err := apisv1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("failed to add %s to scheme: %v", apisv1alpha1.SchemeGroupVersion, err) + } + tenancyClient, err := client.New(loadClusterConfig(t, clusterName), client.Options{Scheme: scheme}) + if err != nil { + t.Fatalf("failed to create a client: %v", err) + } + return tenancyClient +} + +func createWorkspace(t *testing.T, clusterName logicalcluster.Name) client.Client { + t.Helper() + parent, ok := clusterName.Parent() + if !ok { + t.Fatalf("cluster %s has no parent", clusterName) + } + c := loadClient(t, parent) + t.Logf("creating workspace %s", clusterName) + if err := c.Create(context.TODO(), &tenancyv1alpha1.ClusterWorkspace{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName.Base(), + }, + Spec: tenancyv1alpha1.ClusterWorkspaceSpec{ + Type: tenancyv1alpha1.ClusterWorkspaceTypeReference{ + Name: "universal", + Path: "root", + }, + }, + }); err != nil { + t.Fatalf("failed to create workspace: %s: %v", clusterName, err) + } + + t.Logf("waiting for workspace %s to be ready", clusterName) + var workspace tenancyv1alpha1.ClusterWorkspace + if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) { + fetchErr := c.Get(context.TODO(), client.ObjectKey{Name: clusterName.Base()}, &workspace) + if fetchErr != nil { + t.Logf("failed to get workspace %s: %v", clusterName, err) + return false, fetchErr + } + var reason string + if actual, expected := workspace.Status.Phase, tenancyv1alpha1.ClusterWorkspacePhaseReady; actual != expected { + reason = fmt.Sprintf("phase is %s, not %s", actual, expected) + t.Logf("not done waiting for workspace %s to be ready: %s", clusterName, reason) + } + return reason == "", nil + }); err != nil { + t.Fatalf("workspace %s never ready: %v", clusterName, err) + } + + return createAPIBinding(t, clusterName) +} + +func createAPIBinding(t *testing.T, workspaceCluster logicalcluster.Name) client.Client { + c := loadClient(t, workspaceCluster) + apiName := "controller-runtime-example-data.my.domain" + t.Logf("creating APIBinding %s|%s", workspaceCluster, apiName) + if err := c.Create(context.TODO(), &apisv1alpha1.APIBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: apiName, + }, + Spec: apisv1alpha1.APIBindingSpec{ + Reference: apisv1alpha1.ExportReference{ + Workspace: &apisv1alpha1.WorkspaceExportReference{ + Path: parentWorkspace(t).String(), + ExportName: apiName, + }, + }, + AcceptedPermissionClaims: []apisv1alpha1.PermissionClaim{ + {GroupResource: apisv1alpha1.GroupResource{Resource: "configmaps"}}, + {GroupResource: apisv1alpha1.GroupResource{Resource: "secrets"}}, + {GroupResource: apisv1alpha1.GroupResource{Resource: "namespaces"}}, + }, + }, + }); err != nil { + t.Fatalf("could not create APIBinding %s|%s: %v", workspaceCluster, apiName, err) + } + + t.Logf("waiting for APIBinding %s|%s to be bound", workspaceCluster, apiName) + var apiBinding apisv1alpha1.APIBinding + if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) { + fetchErr := c.Get(context.TODO(), client.ObjectKey{Name: apiName}, &apiBinding) + if fetchErr != nil { + t.Logf("failed to get APIBinding %s|%s: %v", workspaceCluster, apiName, err) + return false, fetchErr + } + var reason string + if !conditions.IsTrue(&apiBinding, apisv1alpha1.InitialBindingCompleted) { + condition := conditions.Get(&apiBinding, apisv1alpha1.InitialBindingCompleted) + if condition != nil { + reason = fmt.Sprintf("%s: %s", condition.Reason, condition.Message) + } else { + reason = "no condition present" + } + t.Logf("not done waiting for APIBinding %s|%s to be bound: %s", workspaceCluster, apiName, reason) + } + return conditions.IsTrue(&apiBinding, apisv1alpha1.InitialBindingCompleted), nil + }); err != nil { + t.Fatalf("APIBinding %s|%s never bound: %v", workspaceCluster, apiName, err) + } + + return c +} + +const characters = "abcdefghijklmnopqrstuvwxyz" + +func randomName() string { + b := make([]byte, 10) + for i := range b { + b[i] = characters[rand.Intn(len(characters))] + } + return string(b) +} + +// TestConfigMapController verifies that our ConfigMap behavior works. +func TestConfigMapController(t *testing.T) { + t.Parallel() + for i := 0; i < 3; i++ { + t.Run(fmt.Sprintf("attempt-%d", i), func(t *testing.T) { + t.Parallel() + workspaceCluster := parentWorkspace(t).Join(randomName()) + c := createWorkspace(t, workspaceCluster) + + namespaceName := randomName() + t.Logf("creating namespace %s|%s", workspaceCluster, namespaceName) + if err := c.Create(context.TODO(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + }); err != nil { + t.Fatalf("failed to create a namespace: %v", err) + } + + otherNamespaceName := randomName() + data := randomName() + configmapName := randomName() + t.Logf("creating configmap %s|%s/%s", workspaceCluster, namespaceName, configmapName) + if err := c.Create(context.TODO(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configmapName, + Namespace: namespaceName, + Labels: map[string]string{ + "name": "timothy", + }, + }, + Data: map[string]string{ + "namespace": otherNamespaceName, + "secretData": data, + }, + }); err != nil { + t.Fatalf("failed to create a configmap: %v", err) + } + + t.Logf("waiting for configmap %s|%s to have a response", workspaceCluster, configmapName) + var configmap corev1.ConfigMap + if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) { + fetchErr := c.Get(context.TODO(), client.ObjectKey{Namespace: namespaceName, Name: configmapName}, &configmap) + if fetchErr != nil { + t.Logf("failed to get configmap %s|%s/%s: %v", workspaceCluster, namespaceName, configmapName, err) + return false, fetchErr + } + response, ok := configmap.Labels["response"] + if !ok { + t.Logf("configmap %s|%s/%s has no response set", workspaceCluster, namespaceName, configmapName) + } + diff := cmp.Diff(response, "hello-timothy") + if ok && diff != "" { + t.Logf("configmap %s|%s/%s has an invalid response: %v", workspaceCluster, namespaceName, configmapName, diff) + } + return diff == "", nil + }); err != nil { + t.Fatalf("configmap %s|%s/%s never got a response: %v", workspaceCluster, namespaceName, configmapName, err) + } + + t.Logf("waiting for namespace %s|%s to exist", workspaceCluster, otherNamespaceName) + var otherNamespace corev1.Namespace + if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) { + fetchErr := c.Get(context.TODO(), client.ObjectKey{Name: otherNamespaceName}, &otherNamespace) + if fetchErr != nil && !apierrors.IsNotFound(fetchErr) { + t.Logf("failed to get namespace %s|%s: %v", workspaceCluster, otherNamespaceName, fetchErr) + return false, fetchErr + } + return fetchErr == nil, nil + }); err != nil { + t.Fatalf("namespace %s|%s never created: %v", workspaceCluster, otherNamespaceName, err) + } + + t.Logf("waiting for secret %s|%s/%s to exist and have correct data", workspaceCluster, namespaceName, configmapName) + var secret corev1.Secret + if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) { + fetchErr := c.Get(context.TODO(), client.ObjectKey{Namespace: namespaceName, Name: configmapName}, &secret) + if fetchErr != nil && !apierrors.IsNotFound(fetchErr) { + t.Logf("failed to get secret %s|%s/%s: %v", workspaceCluster, namespaceName, configmapName, fetchErr) + return false, fetchErr + } + response, ok := secret.Data["dataFromCM"] + if !ok { + t.Logf("secret %s|%s/%s has no data set", workspaceCluster, namespaceName, configmapName) + } + diff := cmp.Diff(string(response), data) + if ok && diff != "" { + t.Logf("secret %s|%s/%s has invalid data: %v", workspaceCluster, namespaceName, configmapName, diff) + } + return diff == "", nil + }); err != nil { + t.Fatalf("secret %s|%s/%s never created: %v", workspaceCluster, namespaceName, configmapName, err) + } + }) + } +} + +// TestWidgetController verifies that our ConfigMap behavior works. +func TestWidgetController(t *testing.T) { + t.Parallel() + for i := 0; i < 3; i++ { + t.Run(fmt.Sprintf("attempt-%d", i), func(t *testing.T) { + t.Parallel() + workspaceCluster := parentWorkspace(t).Join(randomName()) + c := createWorkspace(t, workspaceCluster) + + var totalWidgets int + for i := 0; i < 3; i++ { + namespaceName := randomName() + t.Logf("creating namespace %s|%s", workspaceCluster, namespaceName) + if err := c.Create(context.TODO(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespaceName}, + }); err != nil { + t.Fatalf("failed to create a namespace: %v", err) + } + numWidgets := rand.Intn(10) + for i := 0; i < numWidgets; i++ { + if err := c.Create(context.TODO(), &datav1alpha1.Widget{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespaceName, Name: fmt.Sprintf("widget-%d", i)}, + Spec: datav1alpha1.WidgetSpec{Foo: fmt.Sprintf("intended-%d", i)}, + }); err != nil { + t.Fatalf("failed to create widget: %v", err) + } + } + totalWidgets += numWidgets + } + + t.Logf("waiting for all widgets in cluster %s to have a correct status", workspaceCluster) + var allWidgets datav1alpha1.WidgetList + if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) { + fetchErr := c.List(context.TODO(), &allWidgets) + if fetchErr != nil { + t.Logf("failed to get widgets in cluster %s: %v", workspaceCluster, err) + return false, fetchErr + } + var errs []error + for _, widget := range allWidgets.Items { + if actual, expected := widget.Status.Total, totalWidgets; actual != expected { + errs = append(errs, fmt.Errorf("widget %s|%s .status.total incorrect: %d != %d", workspaceCluster, widget.Name, actual, expected)) + } + } + validationErr := errors.NewAggregate(errs) + if validationErr != nil { + t.Logf("widgets in cluster %s invalid: %v", workspaceCluster, validationErr) + } + return validationErr == nil, nil + }); err != nil { + t.Fatalf("widgets in cluster %s never got correct statuses: %v", workspaceCluster, err) + } + }) + } +}