Skip to content

Commit 6cc9e95

Browse files
committed
MULTIARCH-4989: Remove the kube-rbac-proxy
Images provided under gcr.io/kubebuilder/ will be unavailable from March 18, 2025. Projects initialized with Kubebuilder versions v3.14 or lower utilize gcr.io/kubebuilder/kube-rbac-proxy to protect the metrics endpoint. Following the work in kubernetes-sigs/kubebuilder#4003, this commit removes the kube-rbac-proxy container and let the main container of the controller expose the metrics via HTTPS and by using the WithAuthenticatoinAndAuthorization filter. This also includes a minor fix in BuildService escaped during the resolution of some conflicts during a rebase. Related to kubernetes-sigs/kubebuilder#3871
1 parent 6787518 commit 6cc9e95

14 files changed

+153
-216
lines changed

bundle/manifests/multiarch-tuning-operator-controller-manager-service_v1_service.yaml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,12 @@ metadata:
55
name: multiarch-tuning-operator-controller-manager-service
66
spec:
77
ports:
8-
- port: 443
8+
- name: webhook
9+
port: 443
910
targetPort: 9443
11+
- name: metrics
12+
port: 8443
13+
targetPort: 8443
1014
selector:
1115
control-plane: controller-manager
1216
status:

bundle/manifests/multiarch-tuning-operator.clusterserviceversion.yaml

Lines changed: 9 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ metadata:
2727
categories: OpenShift Optional, Other
2828
console.openshift.io/disable-operand-delete: "false"
2929
containerImage: registry.ci.openshift.org/origin/multiarch-tuning-operator:main
30-
createdAt: "2024-08-27T22:08:50Z"
30+
createdAt: "2024-09-05T20:29:54Z"
3131
features.operators.openshift.io/cnf: "false"
3232
features.operators.openshift.io/cni: "false"
3333
features.operators.openshift.io/csi: "false"
@@ -350,7 +350,7 @@ spec:
350350
containers:
351351
- args:
352352
- --health-probe-bind-address=:8081
353-
- --metrics-bind-address=127.0.0.1:8080
353+
- --metrics-bind-address=:8443
354354
- --leader-elect
355355
- --enable-operator
356356
command:
@@ -373,6 +373,13 @@ spec:
373373
initialDelaySeconds: 15
374374
periodSeconds: 20
375375
name: manager
376+
ports:
377+
- containerPort: 8081
378+
name: health
379+
protocol: TCP
380+
- containerPort: 8443
381+
name: https
382+
protocol: TCP
376383
readinessProbe:
377384
httpGet:
378385
path: /readyz
@@ -395,26 +402,6 @@ spec:
395402
- mountPath: /etc/ssl/certs/
396403
name: ca-projected-volume
397404
readOnly: true
398-
- args:
399-
- --secure-listen-address=0.0.0.0:8443
400-
- --upstream=http://127.0.0.1:8080/
401-
- --logtostderr=true
402-
- --v=0
403-
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1@sha256:d4883d7c622683b3319b5e6b3a7edfbf2594c18060131a8bf64504805f875522
404-
name: kube-rbac-proxy
405-
ports:
406-
- containerPort: 8443
407-
name: https
408-
protocol: TCP
409-
resources:
410-
requests:
411-
cpu: 10m
412-
memory: 64Mi
413-
securityContext:
414-
allowPrivilegeEscalation: false
415-
capabilities:
416-
drop:
417-
- ALL
418405
priorityClassName: system-cluster-critical
419406
securityContext:
420407
runAsNonRoot: true

config/default/kustomization.yaml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,6 @@ bases:
2525
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
2626
#- ../prometheus
2727

28-
patchesStrategicMerge:
29-
# Protect the /metrics endpoint by putting it behind auth.
30-
# If you want your controller-manager to expose the /metrics
31-
# endpoint w/o any authn/z, please comment the following line.
32-
- manager_auth_proxy_patch.yaml
33-
- manager_config_patch.yaml
34-
35-
3628
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
3729
# crd/kustomization.yaml
3830
#- manager_webhook_patch.yaml

config/default/manager_auth_proxy_patch.yaml

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -8,26 +8,6 @@ metadata:
88
spec:
99
template:
1010
spec:
11-
securityContext:
12-
runAsNonRoot: true
13-
seccompProfile:
14-
type: RuntimeDefault
15-
affinity:
16-
nodeAffinity:
17-
requiredDuringSchedulingIgnoredDuringExecution:
18-
nodeSelectorTerms:
19-
- matchExpressions:
20-
- key: kubernetes.io/arch
21-
operator: In
22-
values:
23-
- amd64
24-
- arm64
25-
- ppc64le
26-
- s390x
27-
- key: kubernetes.io/os
28-
operator: In
29-
values:
30-
- linux
3111
containers:
3212
- name: kube-rbac-proxy
3313
securityContext:
@@ -49,18 +29,3 @@ spec:
4929
requests:
5030
cpu: 10m
5131
memory: 64Mi
52-
- name: manager
53-
env:
54-
- name: NAMESPACE
55-
valueFrom:
56-
fieldRef:
57-
fieldPath: metadata.namespace
58-
- name: IMAGE
59-
valueFrom:
60-
fieldRef:
61-
fieldPath: metadata.annotations['multiarch.openshift.io/image']
62-
args:
63-
- "--health-probe-bind-address=:8081"
64-
- "--metrics-bind-address=127.0.0.1:8080"
65-
- "--leader-elect"
66-
- "--enable-operator"

config/default/manager_config_patch.yaml

Lines changed: 0 additions & 38 deletions
This file was deleted.

config/manager/manager.yaml

Lines changed: 66 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -41,43 +41,54 @@ spec:
4141
labels:
4242
control-plane: controller-manager
4343
spec:
44-
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
45-
# according to the platforms which are supported by your solution.
46-
# It is considered best practice to support multiple architectures. You can
47-
# build your manager image using the makefile target docker-buildx.
48-
# affinity:
49-
# nodeAffinity:
50-
# requiredDuringSchedulingIgnoredDuringExecution:
51-
# nodeSelectorTerms:
52-
# - matchExpressions:
53-
# - key: kubernetes.io/arch
54-
# operator: In
55-
# values:
56-
# - amd64
57-
# - arm64
58-
# - ppc64le
59-
# - s390x
60-
# - key: kubernetes.io/os
61-
# operator: In
62-
# values:
63-
# - linux
44+
affinity:
45+
nodeAffinity:
46+
requiredDuringSchedulingIgnoredDuringExecution:
47+
nodeSelectorTerms:
48+
- matchExpressions:
49+
- key: kubernetes.io/arch
50+
operator: In
51+
values:
52+
- amd64
53+
- arm64
54+
- ppc64le
55+
- s390x
56+
- key: kubernetes.io/os
57+
operator: In
58+
values:
59+
- linux
6460
securityContext:
6561
runAsNonRoot: true
66-
# TODO(user): For common cases that do not require escalating privileges
67-
# it is recommended to ensure that all your Pods/Containers are restrictive.
68-
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
69-
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
70-
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
71-
# seccompProfile:
72-
# type: RuntimeDefault
62+
seccompProfile:
63+
type: RuntimeDefault
7364
containers:
7465
- command:
7566
- /manager
7667
args:
77-
- --leader-elect
68+
- "--health-probe-bind-address=:8081"
69+
- "--metrics-bind-address=:8443"
70+
- "--leader-elect"
71+
- "--enable-operator"
72+
env:
73+
- name: NAMESPACE
74+
valueFrom:
75+
fieldRef:
76+
fieldPath: metadata.namespace
77+
- name: IMAGE
78+
valueFrom:
79+
fieldRef:
80+
fieldPath: metadata.annotations['multiarch.openshift.io/image']
7881
image: controller:latest
79-
imagePullPolicy: Always # TODO[aleskandro]: this is for testing reasons.
82+
imagePullPolicy: Always
8083
name: manager
84+
ports:
85+
- containerPort: 8081
86+
name: health
87+
protocol: TCP
88+
- containerPort: 8443
89+
name: https # This should be "metrics", but the automated bundle generation tooling requires the name to be https
90+
# for backwards compatibility with the previous version of kubebuilder that used kube-rbac-proxy
91+
protocol: TCP
8192
securityContext:
8293
allowPrivilegeEscalation: false
8394
capabilities:
@@ -95,12 +106,36 @@ spec:
95106
port: 8081
96107
initialDelaySeconds: 5
97108
periodSeconds: 10
98-
# TODO(user): Configure the resources accordingly based on the project requirements.
99-
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
100109
resources:
101110
requests:
102111
cpu: 10m
103112
memory: 64Mi
113+
volumeMounts:
114+
- mountPath: /var/run/manager/tls
115+
name: multiarch-tuning-operator-controller-manager-service-cert
116+
readOnly: true
117+
- mountPath: /etc/ssl/certs/
118+
name: ca-projected-volume
119+
readOnly: true
104120
priorityClassName: system-cluster-critical
105121
serviceAccountName: controller-manager
106122
terminationGracePeriodSeconds: 10
123+
volumes:
124+
- name: multiarch-tuning-operator-controller-manager-service-cert
125+
secret:
126+
secretName: multiarch-tuning-operator-controller-manager-service-cert
127+
defaultMode: 420
128+
- name: ca-projected-volume
129+
projected:
130+
sources:
131+
- configMap:
132+
name: openshift-service-ca.crt
133+
items:
134+
- key: service-ca.crt
135+
path: openshift-ca.crt
136+
optional: true
137+
- configMap:
138+
name: kube-root-ca.crt
139+
items:
140+
- key: ca.crt
141+
path: kube-root-ca.crt

config/webhook/service.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,9 @@ spec:
77
ports:
88
- port: 443
99
targetPort: 9443
10+
name: webhook
11+
- port: 8443
12+
targetPort: 8443
13+
name: metrics
1014
selector:
1115
control-plane: controller-manager

controllers/operator/clusterpodplacementconfig_controller.go

Lines changed: 6 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/runtime"
2929
errorutils "k8s.io/apimachinery/pkg/util/errors"
30-
"k8s.io/apimachinery/pkg/util/intstr"
3130
"k8s.io/client-go/kubernetes"
3231

3332
ctrl "sigs.k8s.io/controller-runtime"
@@ -211,14 +210,6 @@ func (r *ClusterPodPlacementConfigReconciler) handleDelete(ctx context.Context,
211210
NamespacedTypedClient: r.ClientSet.CoreV1().Services(utils.Namespace()),
212211
ObjName: utils.PodPlacementWebhookName,
213212
},
214-
{
215-
NamespacedTypedClient: r.ClientSet.CoreV1().Services(utils.Namespace()),
216-
ObjName: utils.PodPlacementControllerMetricsServiceName,
217-
},
218-
{
219-
NamespacedTypedClient: r.ClientSet.CoreV1().Services(utils.Namespace()),
220-
ObjName: utils.PodPlacementWebhookMetricsServiceName,
221-
},
222213
{
223214
NamespacedTypedClient: r.ClientSet.AppsV1().Deployments(utils.Namespace()),
224215
ObjName: utils.PodPlacementWebhookName,
@@ -306,6 +297,10 @@ func (r *ClusterPodPlacementConfigReconciler) handleDelete(ctx context.Context,
306297
}
307298
}
308299
objsToDelete = []utils.ToDeleteRef{
300+
{
301+
NamespacedTypedClient: r.ClientSet.CoreV1().Services(utils.Namespace()),
302+
ObjName: utils.PodPlacementControllerName,
303+
},
309304
{
310305
NamespacedTypedClient: r.ClientSet.AppsV1().Deployments(utils.Namespace()),
311306
ObjName: utils.PodPlacementControllerName,
@@ -354,25 +349,8 @@ func (r *ClusterPodPlacementConfigReconciler) reconcile(ctx context.Context, clu
354349
objects := []client.Object{
355350
// The finalizer will not affect the reconciliation of ReplicaSets and Pods
356351
// when updates to the ClusterPodPlacementConfig are made.
357-
buildService(utils.PodPlacementControllerName, utils.PodPlacementControllerName,
358-
443, intstr.FromInt32(9443)),
359-
buildService(utils.PodPlacementWebhookName, utils.PodPlacementWebhookName,
360-
443, intstr.FromInt32(9443)),
361-
buildService(
362-
utils.PodPlacementControllerMetricsServiceName, utils.PodPlacementControllerName,
363-
8443, intstr.FromInt32(8443)),
364-
buildService(
365-
utils.PodPlacementWebhookMetricsServiceName, utils.PodPlacementWebhookName,
366-
8443, intstr.FromInt32(8443)), buildService(utils.PodPlacementControllerName, utils.PodPlacementControllerName,
367-
443, intstr.FromInt32(9443)),
368-
buildService(utils.PodPlacementWebhookName, utils.PodPlacementWebhookName,
369-
443, intstr.FromInt32(9443)),
370-
buildService(
371-
utils.PodPlacementControllerMetricsServiceName, utils.PodPlacementControllerName,
372-
8443, intstr.FromInt32(8443)),
373-
buildService(
374-
utils.PodPlacementWebhookMetricsServiceName, utils.PodPlacementWebhookName,
375-
8443, intstr.FromInt32(8443)),
352+
buildService(utils.PodPlacementControllerName),
353+
buildService(utils.PodPlacementWebhookName),
376354
buildClusterRoleController(), buildClusterRoleWebhook(), buildRoleController(),
377355
buildServiceAccount(utils.PodPlacementWebhookName), buildServiceAccount(utils.PodPlacementControllerName),
378356
buildClusterRoleBinding(utils.PodPlacementControllerName, rbacv1.RoleRef{

0 commit comments

Comments
 (0)