diff --git a/cmd/machineset/main.go b/cmd/machineset/main.go index 91d3d57546..103a048ff4 100644 --- a/cmd/machineset/main.go +++ b/cmd/machineset/main.go @@ -80,21 +80,37 @@ func main() { } // Enable defaulting and validating webhooks - defaulter, err := v1beta1.NewMachineDefaulter() + machineDefaulter, err := v1beta1.NewMachineDefaulter() if err != nil { log.Fatal(err) } - validator, err := v1beta1.NewMachineValidator() + machineValidator, err := v1beta1.NewMachineValidator() if err != nil { log.Fatal(err) } + machineSetDefaulter, err := v1beta1.NewMachineSetDefaulter() + if err != nil { + log.Fatal(err) + } + + machineSetValidator, err := v1beta1.NewMachineSetValidator() + if err != nil { + log.Fatal(err) + } + + machineSetCPValidator := v1beta1.NewMachineSetCPValidator() + if *webhookEnabled { mgr.GetWebhookServer().Port = *webhookPort mgr.GetWebhookServer().CertDir = *webhookCertdir - mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: defaulter}) - mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: validator}) + mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: machineDefaulter}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: machineValidator}) + mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machineset", &webhook.Admission{Handler: machineSetDefaulter}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset", &webhook.Admission{Handler: machineSetValidator}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-delete", &webhook.Admission{Handler: machineSetCPValidator}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-update", &webhook.Admission{Handler: machineSetCPValidator}) } log.Printf("Registering Components.") diff --git a/go.mod b/go.mod index 453c8ac0f7..ac63208049 100644 --- a/go.mod +++ b/go.mod @@ -39,3 +39,5 @@ replace sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api replace sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200529030741-17d4edc5142f replace sigs.k8s.io/cluster-api-provider-gcp => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200528175251-4f2fdeb49fe1 + +replace sigs.k8s.io/controller-runtime => github.com/mgugino-upstream-stage/controller-runtime v0.6.1-0.20200618201807-9d82bf2a7266 diff --git a/go.sum b/go.sum index 974a3f1de7..4aed2dbd3a 100644 --- a/go.sum +++ b/go.sum @@ -300,6 +300,8 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgugino-upstream-stage/controller-runtime v0.6.1-0.20200618201807-9d82bf2a7266 h1:X0OqO02w1UgHOJsqDdWFGupKeIQtrgYoMYPaUMeo6WU= +github.com/mgugino-upstream-stage/controller-runtime v0.6.1-0.20200618201807-9d82bf2a7266/go.mod h1:qN/IYzFHXI7mP9qhUiGRN9uDH3fdAAqBTCqP1YkMEtQ= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= diff --git a/install/0000_30_machine-api-operator_08_webhook.yaml b/install/0000_30_machine-api-operator_08_webhook.yaml index 2f71c2fb85..e8caf1b0ad 100644 --- a/install/0000_30_machine-api-operator_08_webhook.yaml +++ b/install/0000_30_machine-api-operator_08_webhook.yaml @@ -28,6 +28,26 @@ webhooks: resources: - machines sideEffects: None + - clientConfig: + service: + name: machine-api-operator-webhook + namespace: openshift-machine-api + path: /mutate-machine-openshift-io-v1beta1-machineset + # failurePolicy is ignore so we don't want to block machine lifecycle on the webhook operational aspects. + # This would be particularly problematic for chicken egg issues when bootstrapping a cluster. + failurePolicy: Ignore + matchPolicy: Equivalent + name: default.machineset.machine.openshift.io + rules: + - apiGroups: + - machine.openshift.io + apiVersions: + - v1beta1 + operations: + - CREATE + resources: + - machinesets + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration @@ -59,3 +79,64 @@ webhooks: resources: - machines sideEffects: None + - clientConfig: + service: + name: machine-api-operator-webhook + namespace: openshift-machine-api + path: /validate-machine-openshift-io-v1beta1-machineset + # failurePolicy is ignore so we don't want to block machine lifecycle on the webhook operational aspects. + # This would be particularly problematic for chicken egg issues when bootstrapping a cluster. + failurePolicy: Ignore + matchPolicy: Equivalent + name: validation.machineset.machine.openshift.io + rules: + - apiGroups: + - machine.openshift.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - machinesets + sideEffects: None + - clientConfig: + service: + name: machine-api-operator-webhook + namespace: openshift-machine-api + path: /validate-machine-openshift-io-v1beta1-machineset-cp-delete + # failurePolicy is Fail so we ensure control plane machinesets are never + # deleted. + failurePolicy: Fail + matchPolicy: Equivalent + name: delete.cp.validation.machineset.machine.openshift.io + rules: + - apiGroups: + - machine.openshift.io + apiVersions: + - v1beta1 + operations: + - DELETE + resources: + - machinesets + sideEffects: None + - clientConfig: + service: + name: machine-api-operator-webhook + namespace: openshift-machine-api + path: /validate-machine-openshift-io-v1beta1-machineset-cp-update + # failurePolicy is Fail so we ensure control plane machinesets are never + # made non-CP machinesets. + failurePolicy: Fail + matchPolicy: Equivalent + name: update.cp.validation.machineset.machine.openshift.io + rules: + - apiGroups: + - machine.openshift.io + apiVersions: + - v1beta1 + operations: + - UPDATE + resources: + - machinesets + sideEffects: None diff --git a/pkg/apis/machine/v1beta1/machine_webhook.go b/pkg/apis/machine/v1beta1/machine_webhook.go index 7edc742bb6..8fa8595f9a 100644 --- a/pkg/apis/machine/v1beta1/machine_webhook.go +++ b/pkg/apis/machine/v1beta1/machine_webhook.go @@ -119,29 +119,36 @@ func getInfra() (*osconfigv1.Infrastructure, error) { return infra, nil } -type handlerValidationFn func(h *validatorHandler, m *Machine) (bool, utilerrors.Aggregate) -type handlerMutationFn func(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) +type machineAdmissionFn func(m *Machine, clusterID string) (bool, utilerrors.Aggregate) -// validatorHandler validates Machine API resources. -// implements type Handler interface. -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler -type validatorHandler struct { +type admissionHandler struct { clusterID string - webhookOperations handlerValidationFn + webhookOperations machineAdmissionFn decoder *admission.Decoder } -// defaulterHandler defaults Machine API resources. +// InjectDecoder injects the decoder. +func (a *admissionHandler) InjectDecoder(d *admission.Decoder) error { + a.decoder = d + return nil +} + +// machineValidatorHandler validates Machine API resources. // implements type Handler interface. // https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler -type defaulterHandler struct { - clusterID string - webhookOperations handlerMutationFn - decoder *admission.Decoder +type machineValidatorHandler struct { + *admissionHandler +} + +// machineDefaulterHandler defaults Machine API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineDefaulterHandler struct { + *admissionHandler } -// NewValidator returns a new validatorHandler. -func NewMachineValidator() (*validatorHandler, error) { +// NewValidator returns a new machineValidatorHandler. +func NewMachineValidator() (*machineValidatorHandler, error) { infra, err := getInfra() if err != nil { return nil, err @@ -150,29 +157,33 @@ func NewMachineValidator() (*validatorHandler, error) { return createMachineValidator(infra.Status.PlatformStatus.Type, infra.Status.InfrastructureName), nil } -func createMachineValidator(platform osconfigv1.PlatformType, clusterID string) *validatorHandler { - h := &validatorHandler{ - clusterID: clusterID, +func createMachineValidator(platform osconfigv1.PlatformType, clusterID string) *machineValidatorHandler { + return &machineValidatorHandler{ + admissionHandler: &admissionHandler{ + clusterID: clusterID, + webhookOperations: getMachineValidatorOperation(platform), + }, } +} +func getMachineValidatorOperation(platform osconfigv1.PlatformType) machineAdmissionFn { switch platform { case osconfigv1.AWSPlatformType: - h.webhookOperations = validateAWS + return validateAWS case osconfigv1.AzurePlatformType: - h.webhookOperations = validateAzure + return validateAzure case osconfigv1.GCPPlatformType: - h.webhookOperations = validateGCP + return validateGCP default: // just no-op - h.webhookOperations = func(h *validatorHandler, m *Machine) (bool, utilerrors.Aggregate) { + return func(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { return true, nil } } - return h } -// NewDefaulter returns a new defaulterHandler. -func NewMachineDefaulter() (*defaulterHandler, error) { +// NewDefaulter returns a new machineDefaulterHandler. +func NewMachineDefaulter() (*machineDefaulterHandler, error) { infra, err := getInfra() if err != nil { return nil, err @@ -181,41 +192,37 @@ func NewMachineDefaulter() (*defaulterHandler, error) { return createMachineDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil } -func createMachineDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *defaulterHandler { - h := &defaulterHandler{ - clusterID: clusterID, +func createMachineDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineDefaulterHandler { + return &machineDefaulterHandler{ + admissionHandler: &admissionHandler{ + clusterID: clusterID, + webhookOperations: getMachineDefaulterOperation(platformStatus), + }, } +} +func getMachineDefaulterOperation(platformStatus *osconfigv1.PlatformStatus) machineAdmissionFn { switch platformStatus.Type { case osconfigv1.AWSPlatformType: - h.webhookOperations = defaultAWS + return defaultAWS case osconfigv1.AzurePlatformType: - h.webhookOperations = defaultAzure + return defaultAzure case osconfigv1.GCPPlatformType: - h.webhookOperations = gcpDefaulter{projectID: platformStatus.GCP.ProjectID}.defaultGCP + projectID := "" + if platformStatus.GCP != nil { + projectID = platformStatus.GCP.ProjectID + } + return gcpDefaulter{projectID: projectID}.defaultGCP default: // just no-op - h.webhookOperations = func(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { + return func(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { return true, nil } } - return h -} - -// InjectDecoder injects the decoder. -func (v *validatorHandler) InjectDecoder(d *admission.Decoder) error { - v.decoder = d - return nil -} - -// InjectDecoder injects the decoder. -func (v *defaulterHandler) InjectDecoder(d *admission.Decoder) error { - v.decoder = d - return nil } // Handle handles HTTP requests for admission webhook servers. -func (h *validatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { +func (h *machineValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { m := &Machine{} if err := h.decoder.Decode(req, m); err != nil { @@ -224,7 +231,7 @@ func (h *validatorHandler) Handle(ctx context.Context, req admission.Request) ad klog.V(3).Infof("Validate webhook called for Machine: %s", m.GetName()) - if ok, err := h.webhookOperations(h, m); !ok { + if ok, err := h.webhookOperations(m, h.clusterID); !ok { return admission.Denied(err.Error()) } @@ -232,7 +239,7 @@ func (h *validatorHandler) Handle(ctx context.Context, req admission.Request) ad } // Handle handles HTTP requests for admission webhook servers. -func (h *defaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { +func (h *machineDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { m := &Machine{} if err := h.decoder.Decode(req, m); err != nil { @@ -241,7 +248,7 @@ func (h *defaulterHandler) Handle(ctx context.Context, req admission.Request) ad klog.V(3).Infof("Mutate webhook called for Machine: %s", m.GetName()) - if ok, err := h.webhookOperations(h, m); !ok { + if ok, err := h.webhookOperations(m, h.clusterID); !ok { return admission.Denied(err.Error()) } @@ -252,7 +259,7 @@ func (h *defaulterHandler) Handle(ctx context.Context, req admission.Request) ad return admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachine) } -func defaultAWS(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { +func defaultAWS(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting AWS providerSpec") var errs []error @@ -266,7 +273,7 @@ func defaultAWS(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { providerSpec.InstanceType = defaultAWSInstanceType } if providerSpec.IAMInstanceProfile == nil { - providerSpec.IAMInstanceProfile = &aws.AWSResourceReference{ID: defaultAWSIAMInstanceProfile(h.clusterID)} + providerSpec.IAMInstanceProfile = &aws.AWSResourceReference{ID: defaultAWSIAMInstanceProfile(clusterID)} } if providerSpec.UserDataSecret == nil { providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} @@ -282,7 +289,7 @@ func defaultAWS(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { Filters: []aws.Filter{ { Name: "tag:Name", - Values: []string{defaultAWSSecurityGroup(h.clusterID)}, + Values: []string{defaultAWSSecurityGroup(clusterID)}, }, }, }, @@ -293,7 +300,7 @@ func defaultAWS(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { providerSpec.Subnet.Filters = []aws.Filter{ { Name: "tag:Name", - Values: []string{defaultAWSSubnet(h.clusterID, providerSpec.Placement.AvailabilityZone)}, + Values: []string{defaultAWSSubnet(clusterID, providerSpec.Placement.AvailabilityZone)}, }, } } @@ -312,13 +319,17 @@ func defaultAWS(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { } func unmarshalInto(m *Machine, providerSpec interface{}) error { + if m.Spec.ProviderSpec.Value == nil { + return field.Required(field.NewPath("providerSpec", "value"), "a value must be provided") + } + if err := yaml.Unmarshal(m.Spec.ProviderSpec.Value.Raw, &providerSpec); err != nil { return field.Invalid(field.NewPath("providerSpec", "value"), providerSpec, err.Error()) } return nil } -func validateAWS(h *validatorHandler, m *Machine) (bool, utilerrors.Aggregate) { +func validateAWS(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { klog.V(3).Infof("Validating AWS providerSpec") var errs []error @@ -407,7 +418,7 @@ func validateAWS(h *validatorHandler, m *Machine) (bool, utilerrors.Aggregate) { return true, nil } -func defaultAzure(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { +func defaultAzure(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting Azure providerSpec") var errs []error @@ -423,26 +434,26 @@ func defaultAzure(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) // Vnet and Subnet need to be provided together by the user if providerSpec.Vnet == "" && providerSpec.Subnet == "" { - providerSpec.Vnet = defaultAzureVnet(h.clusterID) - providerSpec.Subnet = defaultAzureSubnet(h.clusterID) + providerSpec.Vnet = defaultAzureVnet(clusterID) + providerSpec.Subnet = defaultAzureSubnet(clusterID) // NetworkResourceGroup can be set by the user without Vnet and Subnet, // only override if they didn't set it if providerSpec.NetworkResourceGroup == "" { - providerSpec.NetworkResourceGroup = defaultAzureNetworkResourceGroup(h.clusterID) + providerSpec.NetworkResourceGroup = defaultAzureNetworkResourceGroup(clusterID) } } if providerSpec.Image.ResourceID == "" { - providerSpec.Image.ResourceID = defaultAzureImageResourceID(h.clusterID) + providerSpec.Image.ResourceID = defaultAzureImageResourceID(clusterID) } if providerSpec.ManagedIdentity == "" { - providerSpec.ManagedIdentity = defaultAzureManagedIdentiy(h.clusterID) + providerSpec.ManagedIdentity = defaultAzureManagedIdentiy(clusterID) } if providerSpec.ResourceGroup == "" { - providerSpec.ResourceGroup = defaultAzureResourceGroup(h.clusterID) + providerSpec.ResourceGroup = defaultAzureResourceGroup(clusterID) } if providerSpec.UserDataSecret == nil { @@ -483,7 +494,7 @@ func defaultAzure(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) return true, nil } -func validateAzure(h *validatorHandler, m *Machine) (bool, utilerrors.Aggregate) { +func validateAzure(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { klog.V(3).Infof("Validating Azure providerSpec") var errs []error @@ -566,7 +577,7 @@ type gcpDefaulter struct { projectID string } -func (g gcpDefaulter) defaultGCP(h *defaulterHandler, m *Machine) (bool, utilerrors.Aggregate) { +func (g gcpDefaulter) defaultGCP(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { klog.V(3).Infof("Defaulting GCP providerSpec") var errs []error @@ -582,15 +593,15 @@ func (g gcpDefaulter) defaultGCP(h *defaulterHandler, m *Machine) (bool, utilerr if len(providerSpec.NetworkInterfaces) == 0 { providerSpec.NetworkInterfaces = append(providerSpec.NetworkInterfaces, &gcp.GCPNetworkInterface{ - Network: defaultGCPNetwork(h.clusterID), - Subnetwork: defaultGCPSubnetwork(h.clusterID), + Network: defaultGCPNetwork(clusterID), + Subnetwork: defaultGCPSubnetwork(clusterID), }) } - providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, h.clusterID) + providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, clusterID) if len(providerSpec.Tags) == 0 { - providerSpec.Tags = defaultGCPTags(h.clusterID) + providerSpec.Tags = defaultGCPTags(clusterID) } if providerSpec.UserDataSecret == nil { @@ -602,7 +613,7 @@ func (g gcpDefaulter) defaultGCP(h *defaulterHandler, m *Machine) (bool, utilerr } if len(providerSpec.ServiceAccounts) == 0 { - providerSpec.ServiceAccounts = defaultGCPServiceAccounts(h.clusterID, g.projectID) + providerSpec.ServiceAccounts = defaultGCPServiceAccounts(clusterID, g.projectID) } rawBytes, err := json.Marshal(providerSpec) @@ -644,7 +655,7 @@ func defaultGCPDisks(disks []*gcp.GCPDisk, clusterID string) []*gcp.GCPDisk { return disks } -func validateGCP(h *validatorHandler, m *Machine) (bool, utilerrors.Aggregate) { +func validateGCP(m *Machine, clusterID string) (bool, utilerrors.Aggregate) { klog.V(3).Infof("Validating GCP providerSpec") var errs []error diff --git a/pkg/apis/machine/v1beta1/machine_webhook_test.go b/pkg/apis/machine/v1beta1/machine_webhook_test.go index f476279039..c36ca98bc3 100644 --- a/pkg/apis/machine/v1beta1/machine_webhook_test.go +++ b/pkg/apis/machine/v1beta1/machine_webhook_test.go @@ -2,19 +2,614 @@ package v1beta1 import ( "encoding/json" + "fmt" "testing" + . "github.com/onsi/gomega" osconfigv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" "k8s.io/utils/pointer" aws "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1" gcp "sigs.k8s.io/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" yaml "sigs.k8s.io/yaml" ) +func TestMachineCreation(t *testing.T) { + g := NewWithT(t) + + // Override config getter + ctrl.GetConfig = func() (*rest.Config, error) { + return cfg, nil + } + defer func() { + ctrl.GetConfig = config.GetConfig + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-creation-test", + }, + } + g.Expect(c.Create(ctx, namespace)).To(Succeed()) + defer func() { + g.Expect(c.Delete(ctx, namespace)).To(Succeed()) + }() + + testCases := []struct { + name string + platformType osconfigv1.PlatformType + clusterID string + expectedError string + providerSpecValue *runtime.RawExtension + }{ + { + name: "with AWS and a nil provider spec value", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: nil, + expectedError: "providerSpec.value: Required value: a value must be provided", + }, + { + name: "with AWS and no fields set", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &aws.AWSMachineProviderConfig{}, + }, + expectedError: "providerSpec.ami: Required value: expected either providerSpec.ami.arn or providerSpec.ami.filters or providerSpec.ami.id to be populated", + }, + { + name: "with AWS and an AMI ID set", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &aws.AWSMachineProviderConfig{ + AMI: aws.AWSResourceReference{ + ID: pointer.StringPtr("ami"), + }, + }, + }, + expectedError: "", + }, + { + name: "with Azure and a nil provider spec value", + platformType: osconfigv1.AzurePlatformType, + clusterID: "azure-cluster", + providerSpecValue: nil, + expectedError: "providerSpec.value: Required value: a value must be provided", + }, + { + name: "with Azure and no fields set", + platformType: osconfigv1.AzurePlatformType, + clusterID: "azure-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &azure.AzureMachineProviderSpec{}, + }, + expectedError: "[providerSpec.location: Required value: location should be set to one of the supported Azure regions, providerSpec.osDisk.diskSizeGB: Invalid value: 0: diskSizeGB must be greater than zero]", + }, + { + name: "with Azure and a location and disk size set", + platformType: osconfigv1.AzurePlatformType, + clusterID: "azure-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &azure.AzureMachineProviderSpec{ + Location: "location", + OSDisk: azure.OSDisk{ + DiskSizeGB: 128, + }, + }, + }, + expectedError: "", + }, + { + name: "with GCP and a nil provider spec value", + platformType: osconfigv1.GCPPlatformType, + clusterID: "gcp-cluster", + providerSpecValue: nil, + expectedError: "providerSpec.value: Required value: a value must be provided", + }, + { + name: "with GCP and no fields set", + platformType: osconfigv1.GCPPlatformType, + clusterID: "gcp-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &gcp.GCPMachineProviderSpec{}, + }, + expectedError: "providerSpec.region: Required value: region is required", + }, + { + name: "with GCP and the region and zone set", + platformType: osconfigv1.GCPPlatformType, + clusterID: "gcp-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &gcp.GCPMachineProviderSpec{ + Region: "region", + Zone: "region-zone", + }, + }, + expectedError: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + gs.Expect(err).ToNot(HaveOccurred()) + + platformStatus := &osconfigv1.PlatformStatus{ + Type: tc.platformType, + GCP: &osconfigv1.GCPPlatformStatus{ + ProjectID: "gcp-project-id", + }, + } + + machineDefaulter := createMachineDefaulter(platformStatus, tc.clusterID) + machineValidator := createMachineValidator(platformStatus.Type, tc.clusterID) + mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: machineDefaulter}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: machineValidator}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + + gs.Eventually(func() (bool, error) { + resp, err := insecureHTTPClient.Get(fmt.Sprintf("https://127.0.0.1:%d", testEnv.WebhookInstallOptions.LocalServingPort)) + if err != nil { + return false, err + } + return resp.StatusCode == 404, nil + }).Should(BeTrue()) + + m := &Machine{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machine-creation-", + Namespace: namespace.Name, + }, + Spec: MachineSpec{ + ProviderSpec: ProviderSpec{ + Value: tc.providerSpecValue, + }, + }, + } + err = c.Create(ctx, m) + if err == nil { + defer func() { + gs.Expect(c.Delete(ctx, m)).To(Succeed()) + }() + } + + if tc.expectedError != "" { + gs.Expect(err).ToNot(BeNil()) + gs.Expect(apierrors.ReasonForError(err)).To(BeEquivalentTo(tc.expectedError)) + } else { + gs.Expect(err).To(BeNil()) + } + }) + } +} + +func TestMachineUpdate(t *testing.T) { + awsClusterID := "aws-cluster" + defaultAWSProviderSpec := &aws.AWSMachineProviderConfig{ + AMI: aws.AWSResourceReference{ + ID: pointer.StringPtr("ami"), + }, + InstanceType: defaultAWSInstanceType, + IAMInstanceProfile: &aws.AWSResourceReference{ + ID: defaultAWSIAMInstanceProfile(awsClusterID), + }, + UserDataSecret: &corev1.LocalObjectReference{Name: defaultUserDataSecret}, + CredentialsSecret: &corev1.LocalObjectReference{Name: defaultAWSCredentialsSecret}, + SecurityGroups: []aws.AWSResourceReference{ + { + Filters: []aws.Filter{ + { + Name: "tag:Name", + Values: []string{defaultAWSSecurityGroup(awsClusterID)}, + }, + }, + }, + }, + Placement: aws.Placement{ + Region: "region", + AvailabilityZone: "zone", + }, + Subnet: aws.AWSResourceReference{ + Filters: []aws.Filter{ + { + Name: "tag:Name", + Values: []string{defaultAWSSubnet(awsClusterID, "zone")}, + }, + }, + }, + } + + azureClusterID := "azure-cluster" + defaultAzureProviderSpec := &azure.AzureMachineProviderSpec{ + Location: "location", + VMSize: defaultAzureVMSize, + Vnet: defaultAzureVnet(azureClusterID), + Subnet: defaultAzureSubnet(azureClusterID), + NetworkResourceGroup: defaultAzureNetworkResourceGroup(azureClusterID), + Image: azure.Image{ + ResourceID: defaultAzureImageResourceID(azureClusterID), + }, + ManagedIdentity: defaultAzureManagedIdentiy(azureClusterID), + ResourceGroup: defaultAzureResourceGroup(azureClusterID), + UserDataSecret: &corev1.SecretReference{ + Name: defaultUserDataSecret, + Namespace: defaultSecretNamespace, + }, + CredentialsSecret: &corev1.SecretReference{ + Name: defaultAzureCredentialsSecret, + Namespace: defaultSecretNamespace, + }, + OSDisk: azure.OSDisk{ + DiskSizeGB: 128, + OSType: defaultAzureOSDiskOSType, + ManagedDisk: azure.ManagedDisk{ + StorageAccountType: defaultAzureOSDiskStorageType, + }, + }, + } + + gcpClusterID := "gcp-cluster" + gcpProjectID := "gcp-project-id" + defaultGCPProviderSpec := &gcp.GCPMachineProviderSpec{ + Region: "region", + Zone: "region-zone", + MachineType: defaultGCPMachineType, + NetworkInterfaces: []*gcp.GCPNetworkInterface{ + { + Network: defaultGCPNetwork(gcpClusterID), + Subnetwork: defaultGCPSubnetwork(gcpClusterID), + }, + }, + Disks: []*gcp.GCPDisk{ + { + AutoDelete: true, + Boot: true, + SizeGb: defaultGCPDiskSizeGb, + Type: defaultGCPDiskType, + Image: defaultGCPDiskImage(gcpClusterID), + }, + }, + ServiceAccounts: defaultGCPServiceAccounts(gcpClusterID, gcpProjectID), + Tags: defaultGCPTags(gcpClusterID), + UserDataSecret: &corev1.LocalObjectReference{ + Name: defaultUserDataSecret, + }, + CredentialsSecret: &corev1.LocalObjectReference{ + Name: defaultGCPCredentialsSecret, + }, + } + + g := NewWithT(t) + + // Override config getter + ctrl.GetConfig = func() (*rest.Config, error) { + return cfg, nil + } + defer func() { + ctrl.GetConfig = config.GetConfig + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-update-test", + }, + } + g.Expect(c.Create(ctx, namespace)).To(Succeed()) + defer func() { + g.Expect(c.Delete(ctx, namespace)).To(Succeed()) + }() + + testCases := []struct { + name string + platformType osconfigv1.PlatformType + clusterID string + expectedError string + baseProviderSpecValue *runtime.RawExtension + updatedProviderSpecValue func() *runtime.RawExtension + }{ + { + name: "with a valid AWS ProviderSpec", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + return &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + } + }, + expectedError: "", + }, + { + name: "with an AWS ProviderSpec, removing the instance type", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAWSProviderSpec.DeepCopy() + object.InstanceType = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.instanceType: Required value: expected providerSpec.instanceType to be populated", + }, + { + name: "with an AWS ProviderSpec, removing the instance profile", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAWSProviderSpec.DeepCopy() + object.IAMInstanceProfile = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.iamInstanceProfile: Required value: expected providerSpec.iamInstanceProfile to be populated", + }, + { + name: "with an AWS ProviderSpec, removing the user data secret", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAWSProviderSpec.DeepCopy() + object.UserDataSecret = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.userDataSecret: Required value: expected providerSpec.userDataSecret to be populated", + }, + { + name: "with a valid Azure ProviderSpec", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + return &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + } + }, + expectedError: "", + }, + { + name: "with an Azure ProviderSpec, removing the vm size", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAzureProviderSpec.DeepCopy() + object.VMSize = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.vmSize: Required value: vmSize should be set to one of the supported Azure VM sizes", + }, + { + name: "with an Azure ProviderSpec, removing the subnet", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAzureProviderSpec.DeepCopy() + object.Subnet = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.subnet: Required value: must provide a subnet when a virtual network is specified", + }, + { + name: "with an Azure ProviderSpec, removing the credentials secret", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAzureProviderSpec.DeepCopy() + object.CredentialsSecret = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.credentialsSecret: Required value: credentialsSecret must be provided", + }, + { + name: "with a valid GCP ProviderSpec", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + return &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + } + }, + expectedError: "", + }, + { + name: "with a GCP ProviderSpec, removing the region", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.Region = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.region: Required value: region is required", + }, + { + name: "with a GCP ProviderSpec, and an invalid region", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.Zone = "zone" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.zone: Invalid value: \"zone\": zone not in configured region (region)", + }, + { + name: "with a GCP ProviderSpec, removing the disks", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.Disks = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.disks: Required value: at least 1 disk is required", + }, + { + name: "with a GCP ProviderSpec, removing the service accounts", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.ServiceAccounts = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.serviceAccounts: Invalid value: \"0 service accounts supplied\": exactly 1 service account must be supplied", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + gs.Expect(err).ToNot(HaveOccurred()) + + platformStatus := &osconfigv1.PlatformStatus{ + Type: tc.platformType, + GCP: &osconfigv1.GCPPlatformStatus{ + ProjectID: gcpProjectID, + }, + } + + machineDefaulter := createMachineDefaulter(platformStatus, tc.clusterID) + machineValidator := createMachineValidator(platformStatus.Type, tc.clusterID) + mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: machineDefaulter}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machine", &webhook.Admission{Handler: machineValidator}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + + gs.Eventually(func() (bool, error) { + resp, err := insecureHTTPClient.Get(fmt.Sprintf("https://127.0.0.1:%d", testEnv.WebhookInstallOptions.LocalServingPort)) + if err != nil { + return false, err + } + return resp.StatusCode == 404, nil + }).Should(BeTrue()) + + m := &Machine{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machine-creation-", + Namespace: namespace.Name, + }, + Spec: MachineSpec{ + ProviderSpec: ProviderSpec{ + Value: tc.baseProviderSpecValue, + }, + }, + } + err = c.Create(ctx, m) + gs.Expect(err).ToNot(HaveOccurred()) + defer func() { + gs.Expect(c.Delete(ctx, m)).To(Succeed()) + }() + + m.Spec.ProviderSpec.Value = tc.updatedProviderSpecValue() + err = c.Update(ctx, m) + if tc.expectedError != "" { + gs.Expect(err).ToNot(BeNil()) + gs.Expect(apierrors.ReasonForError(err)).To(BeEquivalentTo(tc.expectedError)) + } else { + gs.Expect(err).To(BeNil()) + } + }) + } +} + func TestValidateAWSProviderSpec(t *testing.T) { testCases := []struct { @@ -124,7 +719,7 @@ func TestValidateAWSProviderSpec(t *testing.T) { } m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} - ok, err := h.webhookOperations(h, m) + ok, err := h.webhookOperations(m, h.clusterID) if ok != tc.expectedOk { t.Errorf("expected: %v, got: %v", tc.expectedOk, ok) } @@ -215,7 +810,7 @@ func TestDefaultAWSProviderSpec(t *testing.T) { } m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} - ok, err := h.webhookOperations(h, m) + ok, err := h.webhookOperations(m, h.clusterID) if ok != tc.expectedOk { t.Errorf("expected: %v, got: %v", tc.expectedOk, ok) } @@ -446,7 +1041,7 @@ func TestValidateAzureProviderSpec(t *testing.T) { } m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} - ok, err := h.webhookOperations(h, m) + ok, err := h.webhookOperations(m, h.clusterID) if ok != tc.expectedOk { t.Errorf("expected: %v, got: %v", tc.expectedOk, ok) } @@ -563,7 +1158,7 @@ func TestDefaultAzureProviderSpec(t *testing.T) { } m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} - ok, err := h.webhookOperations(h, m) + ok, err := h.webhookOperations(m, h.clusterID) if ok != tc.expectedOk { t.Errorf("expected: %v, got: %v", tc.expectedOk, ok) } @@ -832,7 +1427,7 @@ func TestValidateGCPProviderSpec(t *testing.T) { } m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} - ok, err := h.webhookOperations(h, m) + ok, err := h.webhookOperations(m, h.clusterID) if ok != tc.expectedOk { t.Errorf("expected: %v, got: %v", tc.expectedOk, ok) } @@ -941,7 +1536,7 @@ func TestDefaultGCPProviderSpec(t *testing.T) { } m.Spec.ProviderSpec.Value = &runtime.RawExtension{Raw: rawBytes} - ok, err := h.webhookOperations(h, m) + ok, err := h.webhookOperations(m, h.clusterID) if ok != tc.expectedOk { t.Errorf("expected: %v, got: %v", tc.expectedOk, ok) } diff --git a/pkg/apis/machine/v1beta1/machineset_cp_webhook.go b/pkg/apis/machine/v1beta1/machineset_cp_webhook.go new file mode 100644 index 0000000000..28b1324033 --- /dev/null +++ b/pkg/apis/machine/v1beta1/machineset_cp_webhook.go @@ -0,0 +1,84 @@ +package v1beta1 + +import ( + "context" + "fmt" + "net/http" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// MachineSetCPHandler validates ControlPlane MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type MachineSetCPHandler struct { + decoder *admission.Decoder +} + +// NewMachineSetCPValidator returns a new MachineSetCPHandler. +func NewMachineSetCPValidator() *MachineSetCPHandler { + return createMachineSetCPValidator() +} + +func createMachineSetCPValidator() *MachineSetCPHandler { + return &MachineSetCPHandler{} +} + +// InjectDecoder injects the decoder. +func (v *MachineSetCPHandler) InjectDecoder(d *admission.Decoder) error { + v.decoder = d + return nil +} + +// Handle handles HTTP requests for admission webhook servers. +func (v *MachineSetCPHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + oldMS := &MachineSet{} + + // Delete requests, the req.Object is empty. + if err := v.decoder.DecodeRaw(req.OldObject, oldMS); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Validate webhook called for CP MachineSets: %s", oldMS.GetName()) + + newMS := &MachineSet{} + if req.Operation != admissionv1beta1.Delete { + if err := v.decoder.Decode(req, newMS); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + } + + // Succeed if deleting non-CP MachineSet or Updating a non-CP MachineSet + // and the user is not attempting to change it to a CP MachineSet. + if !isCPMS(oldMS) && (req.Operation == admissionv1beta1.Delete || !isCPMS(newMS)) { + return admission.Allowed("MachineSet is Not Control Plane.") + } + + // If the user is updating a CP MachineSet, as long as machine role is + // unchanged, we're ok. + if req.Operation != admissionv1beta1.Delete && isCPMS(newMS) && isCPMS(oldMS) { + return admission.Allowed("Control Plane MachineSet is Valid.") + } + + // User is peforming an unallowed operation + + // TODO(michaelgugino): Ensure we account for MachineDeployment ownership + // of a CP machineset in the future if we use them. + return admission.Denied(fmt.Sprintf("Requested %v of Control Plane MachineSet Not Allowed.", req.Operation)) + +} + +func isCPMS(ms *MachineSet) bool { + if ms.Spec.Template.ObjectMeta.Labels == nil { + return false + } + val, ok := ms.Spec.Template.ObjectMeta.Labels["machine.openshift.io/cluster-api-machine-role"] + if ok { + if val == "master" { + return true + } + } + return false +} diff --git a/pkg/apis/machine/v1beta1/machineset_types_test.go b/pkg/apis/machine/v1beta1/machineset_types_test.go index 53d6f3eca5..26260a9d51 100644 --- a/pkg/apis/machine/v1beta1/machineset_types_test.go +++ b/pkg/apis/machine/v1beta1/machineset_types_test.go @@ -32,12 +32,39 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) func TestStorageMachineSet(t *testing.T) { key := types.NamespacedName{Name: "foo", Namespace: "default"} created := &MachineSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + + gs.Expect(err).ToNot(HaveOccurred()) + + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-delete", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-update", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + // Test Create fetched := &MachineSet{} if err := c.Create(context.TODO(), created); err != nil { diff --git a/pkg/apis/machine/v1beta1/machineset_webhook.go b/pkg/apis/machine/v1beta1/machineset_webhook.go new file mode 100644 index 0000000000..595f406028 --- /dev/null +++ b/pkg/apis/machine/v1beta1/machineset_webhook.go @@ -0,0 +1,135 @@ +package v1beta1 + +import ( + "context" + "encoding/json" + "net/http" + + osconfigv1 "github.com/openshift/api/config/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// machineSetValidatorHandler validates MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineSetValidatorHandler struct { + *admissionHandler +} + +// machineSetDefaulterHandler defaults MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineSetDefaulterHandler struct { + *admissionHandler +} + +// NewMachineSetValidator returns a new machineSetValidatorHandler. +func NewMachineSetValidator() (*machineSetValidatorHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineSetValidator(infra.Status.PlatformStatus.Type, infra.Status.InfrastructureName), nil +} + +func createMachineSetValidator(platform osconfigv1.PlatformType, clusterID string) *machineSetValidatorHandler { + return &machineSetValidatorHandler{ + admissionHandler: &admissionHandler{ + clusterID: clusterID, + webhookOperations: getMachineValidatorOperation(platform), + }, + } +} + +// NewMachineSetDefaulter returns a new machineSetDefaulterHandler. +func NewMachineSetDefaulter() (*machineSetDefaulterHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineSetDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil +} + +func createMachineSetDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineSetDefaulterHandler { + return &machineSetDefaulterHandler{ + admissionHandler: &admissionHandler{ + clusterID: clusterID, + webhookOperations: getMachineDefaulterOperation(platformStatus), + }, + } +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineSetValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ms := &MachineSet{} + + if err := h.decoder.Decode(req, ms); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Validate webhook called for MachineSet: %s", ms.GetName()) + + if ok, err := h.validateMachineSet(ms); !ok { + return admission.Denied(err.Error()) + } + + return admission.Allowed("MachineSet valid") +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineSetDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ms := &MachineSet{} + + if err := h.decoder.Decode(req, ms); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Mutate webhook called for MachineSet: %s", ms.GetName()) + + if ok, err := h.defaultMachineSet(ms); !ok { + return admission.Denied(err.Error()) + } + + marshaledMachineSet, err := json.Marshal(ms) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachineSet) +} + +func (h *machineSetValidatorHandler) validateMachineSet(ms *MachineSet) (bool, utilerrors.Aggregate) { + var errs []error + + // Create a Machine from the MachineSet and validate the Machine template + m := &Machine{Spec: ms.Spec.Template.Spec} + if ok, err := h.webhookOperations(m, h.clusterID); !ok { + errs = append(errs, err.Errors()...) + } + + if len(errs) > 0 { + return false, utilerrors.NewAggregate(errs) + } + return true, nil +} + +func (h *machineSetDefaulterHandler) defaultMachineSet(ms *MachineSet) (bool, utilerrors.Aggregate) { + var errs []error + + // Create a Machine from the MachineSet and default the Machine template + m := &Machine{Spec: ms.Spec.Template.Spec} + if ok, err := h.webhookOperations(m, h.clusterID); !ok { + errs = append(errs, err.Errors()...) + } else { + // Restore the defaulted template + ms.Spec.Template.Spec = m.Spec + } + + if len(errs) > 0 { + return false, utilerrors.NewAggregate(errs) + } + return true, nil +} diff --git a/pkg/apis/machine/v1beta1/machineset_webhook_test.go b/pkg/apis/machine/v1beta1/machineset_webhook_test.go new file mode 100644 index 0000000000..eb8d3d9969 --- /dev/null +++ b/pkg/apis/machine/v1beta1/machineset_webhook_test.go @@ -0,0 +1,945 @@ +package v1beta1 + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/gomega" + osconfigv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/utils/pointer" + aws "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1" + azure "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1" + gcp "sigs.k8s.io/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +func TestMachineSetCreation(t *testing.T) { + g := NewWithT(t) + + // Override config getter + ctrl.GetConfig = func() (*rest.Config, error) { + return cfg, nil + } + defer func() { + ctrl.GetConfig = config.GetConfig + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineset-creation-test", + }, + } + g.Expect(c.Create(ctx, namespace)).To(Succeed()) + defer func() { + g.Expect(c.Delete(ctx, namespace)).To(Succeed()) + }() + + testCases := []struct { + name string + platformType osconfigv1.PlatformType + clusterID string + expectedError string + providerSpecValue *runtime.RawExtension + }{ + { + name: "with AWS and a nil provider spec value", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: nil, + expectedError: "providerSpec.value: Required value: a value must be provided", + }, + { + name: "with AWS and no fields set", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &aws.AWSMachineProviderConfig{}, + }, + expectedError: "providerSpec.ami: Required value: expected either providerSpec.ami.arn or providerSpec.ami.filters or providerSpec.ami.id to be populated", + }, + { + name: "with AWS and an AMI ID set", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &aws.AWSMachineProviderConfig{ + AMI: aws.AWSResourceReference{ + ID: pointer.StringPtr("ami"), + }, + }, + }, + expectedError: "", + }, + { + name: "with Azure and a nil provider spec value", + platformType: osconfigv1.AWSPlatformType, + clusterID: "azure-cluster", + providerSpecValue: nil, + expectedError: "providerSpec.value: Required value: a value must be provided", + }, + { + name: "with Azure and no fields set", + platformType: osconfigv1.AzurePlatformType, + clusterID: "azure-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &azure.AzureMachineProviderSpec{}, + }, + expectedError: "[providerSpec.location: Required value: location should be set to one of the supported Azure regions, providerSpec.osDisk.diskSizeGB: Invalid value: 0: diskSizeGB must be greater than zero]", + }, + { + name: "with Azure and a location and disk size set", + platformType: osconfigv1.AzurePlatformType, + clusterID: "azure-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &azure.AzureMachineProviderSpec{ + Location: "location", + OSDisk: azure.OSDisk{ + DiskSizeGB: 128, + }, + }, + }, + expectedError: "", + }, + { + name: "with GCP and a nil provider spec value", + platformType: osconfigv1.AWSPlatformType, + clusterID: "gcp-cluster", + providerSpecValue: nil, + expectedError: "providerSpec.value: Required value: a value must be provided", + }, + { + name: "with GCP and no fields set", + platformType: osconfigv1.GCPPlatformType, + clusterID: "gcp-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &gcp.GCPMachineProviderSpec{}, + }, + expectedError: "providerSpec.region: Required value: region is required", + }, + { + name: "with GCP and the region and zone set", + platformType: osconfigv1.GCPPlatformType, + clusterID: "gcp-cluster", + providerSpecValue: &runtime.RawExtension{ + Object: &gcp.GCPMachineProviderSpec{ + Region: "region", + Zone: "region-zone", + }, + }, + expectedError: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + gs.Expect(err).ToNot(HaveOccurred()) + + platformStatus := &osconfigv1.PlatformStatus{ + Type: tc.platformType, + GCP: &osconfigv1.GCPPlatformStatus{ + ProjectID: "gcp-project-id", + }, + } + + machineSetDefaulter := createMachineSetDefaulter(platformStatus, tc.clusterID) + machineSetValidator := createMachineSetValidator(platformStatus.Type, tc.clusterID) + mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machineset", &webhook.Admission{Handler: machineSetDefaulter}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset", &webhook.Admission{Handler: machineSetValidator}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-delete", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-update", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + + gs.Eventually(func() (bool, error) { + resp, err := insecureHTTPClient.Get(fmt.Sprintf("https://127.0.0.1:%d", testEnv.WebhookInstallOptions.LocalServingPort)) + if err != nil { + return false, err + } + return resp.StatusCode == 404, nil + }).Should(BeTrue()) + + ms := &MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machineset-creation-", + Namespace: namespace.Name, + }, + Spec: MachineSetSpec{ + Template: MachineTemplateSpec{ + Spec: MachineSpec{ + ProviderSpec: ProviderSpec{ + Value: tc.providerSpecValue, + }, + }, + }, + }, + } + err = c.Create(ctx, ms) + if err == nil { + defer func() { + gs.Expect(c.Delete(ctx, ms)).To(Succeed()) + }() + } + + if tc.expectedError != "" { + gs.Expect(err).ToNot(BeNil()) + gs.Expect(apierrors.ReasonForError(err)).To(BeEquivalentTo(tc.expectedError)) + } else { + gs.Expect(err).To(BeNil()) + } + }) + } +} + +func TestMachineSetUpdate(t *testing.T) { + awsClusterID := "aws-cluster" + defaultAWSProviderSpec := &aws.AWSMachineProviderConfig{ + AMI: aws.AWSResourceReference{ + ID: pointer.StringPtr("ami"), + }, + InstanceType: defaultAWSInstanceType, + IAMInstanceProfile: &aws.AWSResourceReference{ + ID: defaultAWSIAMInstanceProfile(awsClusterID), + }, + UserDataSecret: &corev1.LocalObjectReference{Name: defaultUserDataSecret}, + CredentialsSecret: &corev1.LocalObjectReference{Name: defaultAWSCredentialsSecret}, + SecurityGroups: []aws.AWSResourceReference{ + { + Filters: []aws.Filter{ + { + Name: "tag:Name", + Values: []string{defaultAWSSecurityGroup(awsClusterID)}, + }, + }, + }, + }, + Placement: aws.Placement{ + Region: "region", + AvailabilityZone: "zone", + }, + Subnet: aws.AWSResourceReference{ + Filters: []aws.Filter{ + { + Name: "tag:Name", + Values: []string{defaultAWSSubnet(awsClusterID, "zone")}, + }, + }, + }, + } + + azureClusterID := "azure-cluster" + defaultAzureProviderSpec := &azure.AzureMachineProviderSpec{ + Location: "location", + VMSize: defaultAzureVMSize, + Vnet: defaultAzureVnet(azureClusterID), + Subnet: defaultAzureSubnet(azureClusterID), + NetworkResourceGroup: defaultAzureNetworkResourceGroup(azureClusterID), + Image: azure.Image{ + ResourceID: defaultAzureImageResourceID(azureClusterID), + }, + ManagedIdentity: defaultAzureManagedIdentiy(azureClusterID), + ResourceGroup: defaultAzureResourceGroup(azureClusterID), + UserDataSecret: &corev1.SecretReference{ + Name: defaultUserDataSecret, + Namespace: defaultSecretNamespace, + }, + CredentialsSecret: &corev1.SecretReference{ + Name: defaultAzureCredentialsSecret, + Namespace: defaultSecretNamespace, + }, + OSDisk: azure.OSDisk{ + DiskSizeGB: 128, + OSType: defaultAzureOSDiskOSType, + ManagedDisk: azure.ManagedDisk{ + StorageAccountType: defaultAzureOSDiskStorageType, + }, + }, + } + + gcpClusterID := "gcp-cluster" + gcpProjectID := "gcp-project-id" + defaultGCPProviderSpec := &gcp.GCPMachineProviderSpec{ + Region: "region", + Zone: "region-zone", + MachineType: defaultGCPMachineType, + NetworkInterfaces: []*gcp.GCPNetworkInterface{ + { + Network: defaultGCPNetwork(gcpClusterID), + Subnetwork: defaultGCPSubnetwork(gcpClusterID), + }, + }, + Disks: []*gcp.GCPDisk{ + { + AutoDelete: true, + Boot: true, + SizeGb: defaultGCPDiskSizeGb, + Type: defaultGCPDiskType, + Image: defaultGCPDiskImage(gcpClusterID), + }, + }, + ServiceAccounts: defaultGCPServiceAccounts(gcpClusterID, gcpProjectID), + Tags: defaultGCPTags(gcpClusterID), + UserDataSecret: &corev1.LocalObjectReference{ + Name: defaultUserDataSecret, + }, + CredentialsSecret: &corev1.LocalObjectReference{ + Name: defaultGCPCredentialsSecret, + }, + } + + g := NewWithT(t) + + // Override config getter + ctrl.GetConfig = func() (*rest.Config, error) { + return cfg, nil + } + defer func() { + ctrl.GetConfig = config.GetConfig + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineset-update-test", + }, + } + g.Expect(c.Create(ctx, namespace)).To(Succeed()) + defer func() { + g.Expect(c.Delete(ctx, namespace)).To(Succeed()) + }() + + testCases := []struct { + name string + platformType osconfigv1.PlatformType + clusterID string + expectedError string + baseProviderSpecValue *runtime.RawExtension + updatedProviderSpecValue func() *runtime.RawExtension + }{ + { + name: "with a valid AWS ProviderSpec", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + return &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + } + }, + expectedError: "", + }, + { + name: "with an AWS ProviderSpec, removing the instance type", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAWSProviderSpec.DeepCopy() + object.InstanceType = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.instanceType: Required value: expected providerSpec.instanceType to be populated", + }, + { + name: "with an AWS ProviderSpec, removing the instance profile", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAWSProviderSpec.DeepCopy() + object.IAMInstanceProfile = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.iamInstanceProfile: Required value: expected providerSpec.iamInstanceProfile to be populated", + }, + { + name: "with an AWS ProviderSpec, removing the user data secret", + platformType: osconfigv1.AWSPlatformType, + clusterID: awsClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAWSProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAWSProviderSpec.DeepCopy() + object.UserDataSecret = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.userDataSecret: Required value: expected providerSpec.userDataSecret to be populated", + }, + { + name: "with a valid Azure ProviderSpec", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + return &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + } + }, + expectedError: "", + }, + { + name: "with an Azure ProviderSpec, removing the vm size", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAzureProviderSpec.DeepCopy() + object.VMSize = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.vmSize: Required value: vmSize should be set to one of the supported Azure VM sizes", + }, + { + name: "with an Azure ProviderSpec, removing the subnet", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAzureProviderSpec.DeepCopy() + object.Subnet = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.subnet: Required value: must provide a subnet when a virtual network is specified", + }, + { + name: "with an Azure ProviderSpec, removing the credentials secret", + platformType: osconfigv1.AzurePlatformType, + clusterID: azureClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultAzureProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultAzureProviderSpec.DeepCopy() + object.CredentialsSecret = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.credentialsSecret: Required value: credentialsSecret must be provided", + }, + { + name: "with a valid GCP ProviderSpec", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + return &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + } + }, + expectedError: "", + }, + { + name: "with a GCP ProviderSpec, removing the region", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.Region = "" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.region: Required value: region is required", + }, + { + name: "with a GCP ProviderSpec, and an invalid region", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.Zone = "zone" + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.zone: Invalid value: \"zone\": zone not in configured region (region)", + }, + { + name: "with a GCP ProviderSpec, removing the disks", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.Disks = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.disks: Required value: at least 1 disk is required", + }, + { + name: "with a GCP ProviderSpec, removing the service accounts", + platformType: osconfigv1.GCPPlatformType, + clusterID: gcpClusterID, + baseProviderSpecValue: &runtime.RawExtension{ + Object: defaultGCPProviderSpec.DeepCopy(), + }, + updatedProviderSpecValue: func() *runtime.RawExtension { + object := defaultGCPProviderSpec.DeepCopy() + object.ServiceAccounts = nil + return &runtime.RawExtension{ + Object: object, + } + }, + expectedError: "providerSpec.serviceAccounts: Invalid value: \"0 service accounts supplied\": exactly 1 service account must be supplied", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + gs.Expect(err).ToNot(HaveOccurred()) + + platformStatus := &osconfigv1.PlatformStatus{ + Type: tc.platformType, + GCP: &osconfigv1.GCPPlatformStatus{ + ProjectID: gcpProjectID, + }, + } + + machineSetDefaulter := createMachineSetDefaulter(platformStatus, tc.clusterID) + machineSetValidator := createMachineSetValidator(platformStatus.Type, tc.clusterID) + mgr.GetWebhookServer().Register("/mutate-machine-openshift-io-v1beta1-machineset", &webhook.Admission{Handler: machineSetDefaulter}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset", &webhook.Admission{Handler: machineSetValidator}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-delete", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-update", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + + gs.Eventually(func() (bool, error) { + resp, err := insecureHTTPClient.Get(fmt.Sprintf("https://127.0.0.1:%d", testEnv.WebhookInstallOptions.LocalServingPort)) + if err != nil { + return false, err + } + return resp.StatusCode == 404, nil + }).Should(BeTrue()) + + ms := &MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machineset-update-", + Namespace: namespace.Name, + }, + Spec: MachineSetSpec{ + Template: MachineTemplateSpec{ + Spec: MachineSpec{ + ProviderSpec: ProviderSpec{ + Value: tc.baseProviderSpecValue, + }, + }, + }, + }, + } + err = c.Create(ctx, ms) + gs.Expect(err).ToNot(HaveOccurred()) + defer func() { + gs.Expect(c.Delete(ctx, ms)).To(Succeed()) + }() + + ms.Spec.Template.Spec.ProviderSpec.Value = tc.updatedProviderSpecValue() + err = c.Update(ctx, ms) + if tc.expectedError != "" { + gs.Expect(err).ToNot(BeNil()) + gs.Expect(apierrors.ReasonForError(err)).To(BeEquivalentTo(tc.expectedError)) + } else { + gs.Expect(err).To(BeNil()) + } + }) + } +} + +func TestCPMachineSetDelete(t *testing.T) { + g := NewWithT(t) + + // Override config getter + ctrl.GetConfig = func() (*rest.Config, error) { + return cfg, nil + } + defer func() { + ctrl.GetConfig = config.GetConfig + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineset-cp-delete-test", + }, + } + g.Expect(c.Create(ctx, namespace)).To(Succeed()) + defer func() { + g.Expect(c.Delete(ctx, namespace)).To(Succeed()) + }() + + testCases := []struct { + name string + expectedError string + objectMeta ObjectMeta + }{ + { + name: "is not CP MachineSet", + expectedError: "", + objectMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker", + }, + }, + }, + { + name: "is not CP MachineSet, no labels", + expectedError: "", + objectMeta: ObjectMeta{}, + }, + { + name: "is CP MachineSet", + expectedError: "Requested DELETE of Control Plane MachineSet Not Allowed.", + objectMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "master", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + gs.Expect(err).ToNot(HaveOccurred()) + + machineSetCPDeletionValidator := NewMachineSetCPValidator() + gs.Expect(err).ToNot(HaveOccurred()) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-delete", &webhook.Admission{Handler: machineSetCPDeletionValidator}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-update", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + + ms := &MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machineset-cp-deletion-", + Namespace: namespace.Name, + }, + Spec: MachineSetSpec{ + Template: MachineTemplateSpec{ + Spec: MachineSpec{}, + }, + }, + } + + ms.Spec.Template.ObjectMeta = tc.objectMeta + gs.Expect(c.Create(ctx, ms)).To(Succeed()) + + err = c.Delete(ctx, ms) + + if tc.expectedError != "" { + defer func() { + ms.Spec.Template.ObjectMeta.Labels["machine.openshift.io/cluster-api-machine-role"] = "worker" + gs.Expect(c.Update(ctx, ms)).To(Succeed()) + gs.Expect(c.Delete(ctx, ms)).To(Succeed()) + }() + gs.Expect(err).ToNot(BeNil()) + gs.Expect(apierrors.ReasonForError(err)).To(BeEquivalentTo(tc.expectedError)) + } else { + gs.Expect(err).To(BeNil()) + } + }) + } +} + +func TestCPMachineSetUpdate(t *testing.T) { + g := NewWithT(t) + + // Override config getter + ctrl.GetConfig = func() (*rest.Config, error) { + return cfg, nil + } + defer func() { + ctrl.GetConfig = config.GetConfig + }() + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineset-cp-update-test", + }, + } + g.Expect(c.Create(ctx, namespace)).To(Succeed()) + defer func() { + g.Expect(c.Delete(ctx, namespace)).To(Succeed()) + }() + + testCases := []struct { + name string + expectedError string + originalMeta ObjectMeta + updateMeta ObjectMeta + }{ + { + name: "is not CP MachineSet, not becoming CP", + expectedError: "", + originalMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker", + }, + }, + updateMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker2", + }, + }, + }, + { + name: "is not CP MachineSet, labels removed", + expectedError: "", + originalMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker", + }, + }, + updateMeta: ObjectMeta{ + Labels: nil, + }, + }, + { + name: "no Lables (non-CP) to non-CP labels", + expectedError: "", + originalMeta: ObjectMeta{ + Labels: nil, + }, + updateMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker", + }, + }, + }, + { + name: "CP MachineSet, add another label", + expectedError: "", + originalMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "master", + }, + }, + updateMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "master", + "secondlabel": "second label value", + }, + }, + }, + { + name: "CP MachineSet, try to change role", + expectedError: "Requested UPDATE of Control Plane MachineSet Not Allowed.", + originalMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "master", + }, + }, + updateMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker", + }, + }, + }, + { + name: "CP MachineSet, try to remove labels", + expectedError: "Requested UPDATE of Control Plane MachineSet Not Allowed.", + originalMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "master", + }, + }, + updateMeta: ObjectMeta{ + Labels: nil, + }, + }, + { + name: "Non CP become CP", + expectedError: "Requested UPDATE of Control Plane MachineSet Not Allowed.", + originalMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "worker", + }, + }, + updateMeta: ObjectMeta{ + Labels: map[string]string{ + "machine.openshift.io/cluster-api-machine-role": "master", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gs := NewWithT(t) + + mgr, err := manager.New(cfg, manager.Options{ + MetricsBindAddress: "0", + Port: testEnv.WebhookInstallOptions.LocalServingPort, + CertDir: testEnv.WebhookInstallOptions.LocalServingCertDir, + }) + gs.Expect(err).ToNot(HaveOccurred()) + + machineSetCPUpdateValidator := NewMachineSetCPValidator() + gs.Expect(err).ToNot(HaveOccurred()) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-delete", &webhook.Admission{Handler: createMachineSetMockHandler(true)}) + mgr.GetWebhookServer().Register("/validate-machine-openshift-io-v1beta1-machineset-cp-update", &webhook.Admission{Handler: machineSetCPUpdateValidator}) + + done := make(chan struct{}) + stopped := make(chan struct{}) + go func() { + defer close(stopped) + gs.Expect(mgr.Start(done)).To(Succeed()) + }() + defer func() { + close(done) + <-stopped + }() + + gs.Eventually(func() (bool, error) { + resp, err := insecureHTTPClient.Get(fmt.Sprintf("https://127.0.0.1:%d", testEnv.WebhookInstallOptions.LocalServingPort)) + if err != nil { + return false, err + } + return resp.StatusCode == 404, nil + }).Should(BeTrue()) + + ms := &MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machineset-cp-update-", + Namespace: namespace.Name, + }, + Spec: MachineSetSpec{ + Template: MachineTemplateSpec{ + Spec: MachineSpec{}, + }, + }, + } + + ms.Spec.Template.ObjectMeta = tc.originalMeta + err = c.Create(ctx, ms) + gs.Expect(err).ToNot(HaveOccurred()) + defer func() { + gs.Expect(c.Delete(ctx, ms)).To(Succeed()) + }() + + ms.Spec.Template.ObjectMeta = tc.updateMeta + + err = c.Update(ctx, ms) + + if tc.expectedError != "" { + gs.Expect(err).ToNot(BeNil()) + gs.Expect(apierrors.ReasonForError(err)).To(BeEquivalentTo(tc.expectedError)) + } else { + gs.Expect(err).To(BeNil()) + } + }) + } +} + +type MachineSetMockHandler struct { + decoder *admission.Decoder + shouldAdmit bool +} + +func createMachineSetMockHandler(shouldAdmit bool) *MachineSetMockHandler { + return &MachineSetMockHandler{shouldAdmit: shouldAdmit} +} + +// InjectDecoder injects the decoder. +func (h *MachineSetMockHandler) InjectDecoder(d *admission.Decoder) error { + h.decoder = d + return nil +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *MachineSetMockHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + if h.shouldAdmit { + return admission.Allowed("OK") + } + return admission.Denied("Not OK") +} diff --git a/pkg/apis/machine/v1beta1/v1beta1_suite_test.go b/pkg/apis/machine/v1beta1/v1beta1_suite_test.go index fff26e5d4a..e8a8d3ad10 100644 --- a/pkg/apis/machine/v1beta1/v1beta1_suite_test.go +++ b/pkg/apis/machine/v1beta1/v1beta1_suite_test.go @@ -17,13 +17,17 @@ limitations under the License. package v1beta1 import ( + "context" + "crypto/tls" "log" + "net/http" "os" "path/filepath" "testing" "time" fuzz "github.com/google/gofuzz" + osconfigv1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" @@ -32,12 +36,29 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" ) -var cfg *rest.Config -var c client.Client +var ( + cfg *rest.Config + c client.Client + ctx = context.Background() + testEnv *envtest.Environment + insecureHTTPClient = http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } +) func TestMain(m *testing.M) { - t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "install")}, + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "..", "install"), + filepath.Join("..", "..", "..", "..", "vendor", "github.com", "openshift", "api", "config", "v1"), + }, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + DirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "install")}, + }, } err := SchemeBuilder.AddToScheme(scheme.Scheme) @@ -45,7 +66,12 @@ func TestMain(m *testing.M) { log.Fatal(err) } - if cfg, err = t.Start(); err != nil { + err = osconfigv1.AddToScheme(scheme.Scheme) + if err != nil { + log.Fatal(err) + } + + if cfg, err = testEnv.Start(); err != nil { log.Fatal(err) } @@ -54,7 +80,7 @@ func TestMain(m *testing.M) { } code := m.Run() - t.Stop() + testEnv.Stop() os.Exit(code) } diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 0000000000..ba49e3c234 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,5 @@ +root = true + +[*] +indent_style = tab +indent_size = 4 diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 0000000000..4cd0cbaf43 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 0000000000..981d1bb813 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go + +go: + - 1.8.x + - 1.9.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +before_script: + - go get -u github.com/golang/lint/golint + +script: + - go test -v --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + +os: + - linux + - osx + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 0000000000..5ab5d41c54 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 0000000000..be4d7ea2c1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 0000000000..828a60b24b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 0000000000..f21e540800 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 0000000000..3993207413 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,79 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +|Adapter |OS |Status | +|----------|----------|----------| +|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| +|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| +|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| +|fanotify |Linux 2.6.37+ | | +|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| +|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 0000000000..ced39cb881 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 0000000000..190bf0de57 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 0000000000..d9fd1b88a0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 0000000000..cc7db4b22e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 0000000000..86e76a3d67 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 0000000000..7d8de14513 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 0000000000..9139e17161 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 0000000000..09436f31d8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 93db4c0976..6001ae1c69 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -30,6 +30,8 @@ github.com/evanphx/json-patch github.com/exponent-io/jsonpath # github.com/fatih/color v1.7.0 github.com/fatih/color +# github.com/fsnotify/fsnotify v1.4.7 +github.com/fsnotify/fsnotify # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml # github.com/go-logr/logr v0.1.0 @@ -791,7 +793,7 @@ sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1 sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 # sigs.k8s.io/cluster-api-provider-gcp v0.0.0-00010101000000-000000000000 => github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200528175251-4f2fdeb49fe1 sigs.k8s.io/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1 -# sigs.k8s.io/controller-runtime v0.6.0 +# sigs.k8s.io/controller-runtime v0.6.0 => github.com/mgugino-upstream-stage/controller-runtime v0.6.1-0.20200618201807-9d82bf2a7266 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder sigs.k8s.io/controller-runtime/pkg/cache diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES index 4756d9bb4f..52b6673a2f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES @@ -7,6 +7,7 @@ aliases: - directxman12 - droot - mengqiy + - pwittrock # non-admin folks who have write-access and can approve any PRs in the repo controller-runtime-maintainers: @@ -36,5 +37,4 @@ aliases: # folks who may have context on ancient history, # but are no longer directly involved - controller-runtime-emeritus-maintainers: - - pwittrock + # controller-runtime-emeritus-maintainers: diff --git a/vendor/sigs.k8s.io/controller-runtime/go.mod b/vendor/sigs.k8s.io/controller-runtime/go.mod index 2c96d07548..8ef7b0285a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.mod +++ b/vendor/sigs.k8s.io/controller-runtime/go.mod @@ -4,6 +4,7 @@ go 1.13 require ( github.com/evanphx/json-patch v4.5.0+incompatible + github.com/fsnotify/fsnotify v1.4.7 github.com/go-logr/logr v0.1.0 github.com/go-logr/zapr v0.1.0 github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect @@ -19,7 +20,6 @@ require ( golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gomodules.xyz/jsonpatch/v2 v2.0.1 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/fsnotify.v1 v1.4.7 k8s.io/api v0.18.2 k8s.io/apiextensions-apiserver v0.18.2 k8s.io/apimachinery v0.18.2 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index be688d5e89..e7d3c5c35b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -96,7 +96,11 @@ func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Ob return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") } accessor.SetResourceVersion("1") - return t.ObjectTracker.Create(gvr, obj, ns) + if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { + accessor.SetResourceVersion("") + return err + } + return nil } func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { @@ -267,7 +271,7 @@ func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...cli } func (c *fakeClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { - gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) + gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go index 5260b6fdee..22a093cab0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -17,7 +17,11 @@ limitations under the License. package client import ( + "fmt" + jsonpatch "github.com/evanphx/json-patch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -59,8 +63,39 @@ func ConstantPatch(patchType types.PatchType, data []byte) Patch { return RawPatch(patchType, data) } +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + type mergeFromPatch struct { from runtime.Object + opts MergeFromOptions } // Type implements patch. @@ -80,12 +115,47 @@ func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) { return nil, err } - return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return nil, err + } + + if s.opts.OptimisticLock { + dataMap := map[string]interface{}{} + if err := json.Unmarshal(data, &dataMap); err != nil { + return nil, err + } + fromMeta, ok := s.from.(metav1.Object) + if !ok { + return nil, fmt.Errorf("cannot use OptimisticLock, from object %q is not a valid metav1.Object", s.from) + } + resourceVersion := fromMeta.GetResourceVersion() + if len(resourceVersion) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, from object %q does not have any resource version we can use", s.from) + } + u := &unstructured.Unstructured{Object: dataMap} + u.SetResourceVersion(resourceVersion) + data, err = json.Marshal(u) + if err != nil { + return nil, err + } + } + + return data, nil } // MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. func MergeFrom(obj runtime.Object) Patch { - return &mergeFromPatch{obj} + return &mergeFromPatch{from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFromWithOptions(obj runtime.Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{from: obj, opts: *options} } // mergePatch uses a raw merge strategy to patch the object. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 7994669136..3dc526efc2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -102,12 +102,7 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller // Create controller with dependencies set c := &controller.Controller{ - Do: options.Reconciler, - Cache: mgr.GetCache(), - Config: mgr.GetConfig(), - Scheme: mgr.GetScheme(), - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(name), + Do: options.Reconciler, MakeQueue: func() workqueue.RateLimitingInterface { return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name) }, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index 27ce71d03d..9c8ec25768 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -237,8 +237,8 @@ func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { // MutateFn is a function which mutates the existing object into it's desired state. type MutateFn func() error -// AddFinalizer accepts a metav1 object and adds the provided finalizer if not present. -func AddFinalizer(o metav1.Object, finalizer string) { +// AddFinalizer accepts an Object and adds the provided finalizer if not present. +func AddFinalizer(o Object, finalizer string) { f := o.GetFinalizers() for _, e := range f { if e == finalizer { @@ -250,17 +250,19 @@ func AddFinalizer(o metav1.Object, finalizer string) { // AddFinalizerWithError tries to convert a runtime object to a metav1 object and add the provided finalizer. // It returns an error if the provided object cannot provide an accessor. +// +// Deprecated: Use AddFinalizer instead. Check is performing on compile time. func AddFinalizerWithError(o runtime.Object, finalizer string) error { m, err := meta.Accessor(o) if err != nil { return err } - AddFinalizer(m, finalizer) + AddFinalizer(m.(Object), finalizer) return nil } -// RemoveFinalizer accepts a metav1 object and removes the provided finalizer if present. -func RemoveFinalizer(o metav1.Object, finalizer string) { +// RemoveFinalizer accepts an Object and removes the provided finalizer if present. +func RemoveFinalizer(o Object, finalizer string) { f := o.GetFinalizers() for i := 0; i < len(f); i++ { if f[i] == finalizer { @@ -273,11 +275,31 @@ func RemoveFinalizer(o metav1.Object, finalizer string) { // RemoveFinalizerWithError tries to convert a runtime object to a metav1 object and remove the provided finalizer. // It returns an error if the provided object cannot provide an accessor. +// +// Deprecated: Use RemoveFinalizer instead. Check is performing on compile time. func RemoveFinalizerWithError(o runtime.Object, finalizer string) error { m, err := meta.Accessor(o) if err != nil { return err } - RemoveFinalizer(m, finalizer) + RemoveFinalizer(m.(Object), finalizer) return nil } + +// ContainsFinalizer checks an Object that the provided finalizer is present. +func ContainsFinalizer(o Object, finalizer string) bool { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// Object allows functions to work indistinctly with any resource that +// implements both Object interfaces. +type Object interface { + metav1.Object + runtime.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go index 62a5843c52..1ca2d8eadc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -72,6 +72,15 @@ func defaultAssetPath(binary string) string { } +// ControlPlane is the re-exported ControlPlane type from the internal integration package +type ControlPlane = integration.ControlPlane + +// APIServer is the re-exported APIServer type from the internal integration package +type APIServer = integration.APIServer + +// Etcd is the re-exported Etcd type from the internal integration package +type Etcd = integration.Etcd + // Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and // install extension APIs type Environment struct { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index 946ef82852..d482f8c27b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -21,14 +21,9 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" @@ -55,19 +50,6 @@ type Controller struct { // Defaults to the DefaultReconcileFunc. Do reconcile.Reconciler - // Client is a lazily initialized Client. The controllerManager will initialize this when Start is called. - Client client.Client - - // Scheme is injected by the controllerManager when controllerManager.Start is called - Scheme *runtime.Scheme - - // informers are injected by the controllerManager when controllerManager.Start is called - Cache cache.Cache - - // Config is the rest.Config used to talk to the apiserver. Defaults to one of in-cluster, environment variable - // specified, or the ~/.kube/Config. - Config *rest.Config - // MakeQueue constructs the queue for this controller once the controller is ready to start. // This exists because the standard Kubernetes workqueues start themselves immediately, which // leads to goroutine leaks if something calls controller.New repeatedly. @@ -86,17 +68,9 @@ type Controller struct { // JitterPeriod allows tests to reduce the JitterPeriod so they complete faster JitterPeriod time.Duration - // WaitForCacheSync allows tests to mock out the WaitForCacheSync function to return an error - // defaults to Cache.WaitForCacheSync - WaitForCacheSync func(stopCh <-chan struct{}) bool - // Started is true if the Controller has been Started Started bool - // Recorder is an event recorder for recording Event resources to the - // Kubernetes API. - Recorder record.EventRecorder - // TODO(community): Consider initializing a logger with the Controller Name as the tag // watches maintains a list of sources, handlers, and predicates to start when the controller is started. @@ -170,16 +144,18 @@ func (c *Controller) Start(stop <-chan struct{}) error { // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches log.Info("Starting Controller", "controller", c.Name) - // Wait for the caches to be synced before starting workers - if c.WaitForCacheSync == nil { - c.WaitForCacheSync = c.Cache.WaitForCacheSync - } - if ok := c.WaitForCacheSync(stop); !ok { - // This code is unreachable right now since WaitForCacheSync will never return an error - // Leaving it here because that could happen in the future - err := fmt.Errorf("failed to wait for %s caches to sync", c.Name) - log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) - return err + for _, watch := range c.watches { + syncingSource, ok := watch.src.(source.SyncingSource) + if !ok { + continue + } + if err := syncingSource.WaitForSync(stop); err != nil { + // This code is unreachable in case of kube watches since WaitForCacheSync will never return an error + // Leaving it here because that could happen in the future + err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err) + log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) + return err + } } if c.JitterPeriod == 0 { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/control_plane.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/control_plane.go index c0dbe02e57..bab0fb20e0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/control_plane.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/control_plane.go @@ -5,6 +5,7 @@ import ( "net/url" "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -42,17 +43,20 @@ func (f *ControlPlane) Start() error { // Stop will stop your control plane processes, and clean up their data. func (f *ControlPlane) Stop() error { + var errList []error + if f.APIServer != nil { if err := f.APIServer.Stop(); err != nil { - return err + errList = append(errList, err) } } if f.Etcd != nil { if err := f.Etcd.Stop(); err != nil { - return err + errList = append(errList, err) } } - return nil + + return utilerrors.NewAggregate(errList) } // APIURL returns the URL you should connect to to talk to your API. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 3e1467df50..4554298350 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -437,7 +437,6 @@ func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) { func (cm *controllerManager) Start(stop <-chan struct{}) error { // join the passed-in stop channel as an upstream feeding into cm.internalStopper - defer close(cm.internalStopper) // initialize this here so that we reset the signal channel state on every start cm.errSignal = &errSignaler{errSignal: make(chan struct{})} @@ -454,11 +453,14 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { go cm.serveHealthProbes(cm.internalStop) } - go cm.startNonLeaderElectionRunnables() + doneCh := make(chan error, 1) + go cm.startNonLeaderElectionRunnables(doneCh) if cm.resourceLock != nil { err := cm.startLeaderElection() if err != nil { + close(cm.internalStopper) + <-doneCh return err } } else { @@ -469,20 +471,23 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { select { case <-stop: - // We are done + close(cm.internalStopper) + <-doneCh return nil case <-cm.errSignal.GotError(): - // Error starting a controller + close(cm.internalStopper) + <-doneCh return cm.errSignal.Error() } } -func (cm *controllerManager) startNonLeaderElectionRunnables() { +func (cm *controllerManager) startNonLeaderElectionRunnables(doneCh chan error) { cm.mu.Lock() defer cm.mu.Unlock() cm.waitForCache() + returnCh := make(chan error, 1) // Start the non-leaderelection Runnables after the cache has synced for _, c := range cm.nonLeaderElectionRunnables { // Controllers block, but we want to return an error if any have an error starting. @@ -495,8 +500,22 @@ func (cm *controllerManager) startNonLeaderElectionRunnables() { // we use %T here because we don't have a good stand-in for "name", // and the full runnable might not serialize (mutexes, etc) log.V(1).Info("non-leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl)) + returnCh <- nil }() } + + doneCount := 0 + + numRunners := len(cm.nonLeaderElectionRunnables) + for doneCount < numRunners { + select { + case <-returnCh: + doneCount++ + default: + } + } + close(returnCh) + close(doneCh) } func (cm *controllerManager) startLeaderElectionRunnables() { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 223c3773ab..1526cef476 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -343,8 +343,8 @@ func New(config *rest.Config, options Options) (Manager, error) { }, nil } -// defaultNewClient creates the default caching client -func defaultNewClient(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { +// DefaultNewClient creates the default caching client +func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { // Create the Client for Write operations. c, err := client.New(config, options) if err != nil { @@ -389,7 +389,7 @@ func setOptionsDefaults(options Options) Options { // Allow newClient to be mocked if options.NewClient == nil { - options.NewClient = defaultNewClient + options.NewClient = DefaultNewClient } // Allow newCache to be mocked diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go index b47a91d042..9af45b93f6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -33,7 +33,7 @@ type Cache interface { } // CacheInto will set informers on i and return the result if it implements Cache. Returns -//// false if i does not implement Cache. +// false if i does not implement Cache. func CacheInto(c cache.Cache, i interface{}) (bool, error) { if s, ok := i.(Cache); ok { return true, s.InjectCache(c) @@ -62,7 +62,7 @@ type Config interface { } // ConfigInto will set config on i and return the result if it implements Config. Returns -//// false if i does not implement Config. +// false if i does not implement Config. func ConfigInto(config *rest.Config, i interface{}) (bool, error) { if s, ok := i.(Config); ok { return true, s.InjectConfig(config) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 32512f97ea..b2c6b2bbc3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -18,6 +18,7 @@ package source import ( "context" + "errors" "fmt" "sync" @@ -56,10 +57,17 @@ type Source interface { Start(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error } +// SyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type SyncingSource interface { + Source + WaitForSync(stop <-chan struct{}) error +} + // NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used // and not overwritten. It can be used to watch objects in a different cluster by passing the cache // from that other cluster -func NewKindWithCache(object runtime.Object, cache cache.Cache) Source { +func NewKindWithCache(object runtime.Object, cache cache.Cache) SyncingSource { return &kindWithCache{kind: Kind{Type: object, cache: cache}} } @@ -72,6 +80,10 @@ func (ks *kindWithCache) Start(handler handler.EventHandler, queue workqueue.Rat return ks.kind.Start(handler, queue, prct...) } +func (ks *kindWithCache) WaitForSync(stop <-chan struct{}) error { + return ks.kind.WaitForSync(stop) +} + // Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) type Kind struct { // Type is the type of object to watch. e.g. &v1.Pod{} @@ -81,7 +93,7 @@ type Kind struct { cache cache.Cache } -var _ Source = &Kind{} +var _ SyncingSource = &Kind{} // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. @@ -118,6 +130,16 @@ func (ks *Kind) String() string { return fmt.Sprintf("kind source: unknown GVK") } +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(stop <-chan struct{}) error { + if !ks.cache.WaitForCacheSync(stop) { + // Would be great to return something more informative here + return errors.New("cache did not sync") + } + return nil +} + var _ inject.Cache = &Kind{} // InjectCache is internal should be called only by the Controller. InjectCache is used to inject @@ -283,6 +305,8 @@ func (is *Informer) String() string { return fmt.Sprintf("informer source: %p", is.Informer) } +var _ Source = Func(nil) + // Func is a function that implements Source type Func func(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go index fbe757cc74..bd797fd738 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go @@ -20,7 +20,7 @@ import ( "crypto/tls" "sync" - "gopkg.in/fsnotify.v1" + "github.com/fsnotify/fsnotify" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go index bd19a3a8da..a29643b244 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go @@ -32,9 +32,30 @@ var ( }, []string{"webhook"}, ) + + // RequestTotal is a prometheus metric which is a counter of the total processed admission requests. + RequestTotal = func() *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "controller_runtime_webhook_requests_total", + Help: "Total number of admission requests by HTTP status code.", + }, + []string{"webhook", "code"}, + ) + }() + + // RequestInFlight is a prometheus metric which is a gauge of the in-flight admission requests. + RequestInFlight = func() *prometheus.GaugeVec { + return prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "controller_runtime_webhook_requests_in_flight", + Help: "Current number of admission requests being served.", + }, + []string{"webhook"}, + ) + }() ) func init() { - metrics.Registry.MustRegister( - RequestLatency) + metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index d542f2de44..e771a936cb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -28,8 +28,9 @@ import ( "path/filepath" "strconv" "sync" - "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" @@ -123,13 +124,23 @@ func (s *Server) Register(path string, hook http.Handler) { // instrumentedHook adds some instrumentation on top of the given webhook. func instrumentedHook(path string, hookRaw http.Handler) http.Handler { - return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { - startTS := time.Now() - defer func() { metrics.RequestLatency.WithLabelValues(path).Observe(time.Since(startTS).Seconds()) }() - hookRaw.ServeHTTP(resp, req) - - // TODO(directxman12): add back in metric about total requests broken down by result? - }) + lbl := prometheus.Labels{"webhook": path} + + lat := metrics.RequestLatency.MustCurryWith(lbl) + cnt := metrics.RequestTotal.MustCurryWith(lbl) + gge := metrics.RequestInFlight.With(lbl) + + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + + return promhttp.InstrumentHandlerDuration( + lat, + promhttp.InstrumentHandlerCounter( + cnt, + promhttp.InstrumentHandlerInFlight(gge, hookRaw), + ), + ) } // Start runs the server.