diff --git a/.github/data/matrix-smoke-plus.json b/.github/data/matrix-smoke-plus.json index 5e67ff5c5b..247ad5370a 100644 --- a/.github/data/matrix-smoke-plus.json +++ b/.github/data/matrix-smoke-plus.json @@ -64,19 +64,26 @@ "platforms": "linux/arm64, linux/amd64" }, { - "label": "policies 1/2", + "label": "policies 1/3", "image": "ubi-9-plus", "type": "plus", - "marker": "'policies and not policies_ac and not policies_jwt and not policies_mtls'", + "marker": "'policies and not policies_ac and not policies_jwt and not policies_mtls and not policies_rl'", "platforms": "linux/arm64, linux/amd64, linux/s390x" }, { - "label": "policies 2/2", + "label": "policies 2/3", "image": "ubi-9-plus", "type": "plus", "marker": "'policies_ac or policies_jwt or policies_mtls'", "platforms": "linux/arm64, linux/amd64, linux/s390x" }, + { + "label": "policies 3/3", + "image": "ubi-9-plus", + "type": "plus", + "marker": "policies_rl", + "platforms": "linux/arm64, linux/amd64, linux/s390x" + }, { "label": "OIDC-UI 1/1", "image": "debian-plus", diff --git a/examples/custom-resources/rate-limit-tiered-jwt-claim/basic-token.jwt b/examples/custom-resources/rate-limit-tiered-jwt-claim/basic-token.jwt index 64f232d0cf..2c47e76901 100644 --- a/examples/custom-resources/rate-limit-tiered-jwt-claim/basic-token.jwt +++ b/examples/custom-resources/rate-limit-tiered-jwt-claim/basic-token.jwt @@ -1 +1 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2RldGFpbHMiOnsibGV2ZWwiOiJCYXNpYyJ9LCJzdWIiOiJjbGllbnQyIiwibmFtZSI6IkphbmUgRG9lIn0.WeylllL0g70FQqtuz9HQh8oI7-1y9Qlx1_LVCZxAGLE \ No newline at end of file +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjAwMDEifQ.eyJ1c2VyX2RldGFpbHMiOnsibGV2ZWwiOiJCYXNpYyJ9LCJzdWIiOiJjbGllbnQyIiwibmFtZSI6IkphbmUgRG9lIn0.Iy187N0_DQF-uQwGzsnn4fOwADNAYotNWB8nSMQaD2M \ No newline at end of file diff --git a/examples/custom-resources/rate-limit-tiered-jwt-claim/default-token.jwt b/examples/custom-resources/rate-limit-tiered-jwt-claim/default-token.jwt index 9ff690334a..ed2c42097f 100644 --- a/examples/custom-resources/rate-limit-tiered-jwt-claim/default-token.jwt +++ b/examples/custom-resources/rate-limit-tiered-jwt-claim/default-token.jwt @@ -1 +1 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJjbGllbnQzIiwibmFtZSI6IkJpbGx5IEJsb2dncyJ9.ARozS58-7UN5enKlH1AXt_QC_tuRoLC1I-lTc0UrJFo \ No newline at end of file +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjAwMDEifQ.eyJzdWIiOiJjbGllbnQzIiwibmFtZSI6IkJpbGx5IEJsb2dncyJ9.i0CScSOYEaMCkrIA8HpvElfjrdDr4pp2HEkpgnc8UVc \ No newline at end of file diff --git a/examples/custom-resources/rate-limit-tiered-jwt-claim/premium-token.jwt b/examples/custom-resources/rate-limit-tiered-jwt-claim/premium-token.jwt index a51c61c7d4..448a2b2660 100644 --- a/examples/custom-resources/rate-limit-tiered-jwt-claim/premium-token.jwt +++ b/examples/custom-resources/rate-limit-tiered-jwt-claim/premium-token.jwt @@ -1 +1 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2RldGFpbHMiOnsibGV2ZWwiOiJQcmVtaXVtIn0sInN1YiI6ImNsaWVudDUiLCJuYW1lIjoiSm9obiBEb2UiLCJpYXQiOjE1MTYyMzkwMjJ9.LxYflVUa01jsoxo4TbC4h5pnqypBamV1XUqKdAGt968 \ No newline at end of file +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjAwMDEifQ.eyJ1c2VyX2RldGFpbHMiOnsibGV2ZWwiOiJQcmVtaXVtIn0sInN1YiI6ImNsaWVudDUiLCJuYW1lIjoiSm9obiBEb2UiLCJpYXQiOjE1MTYyMzkwMjJ9.v2s74eW_jIkR0VKKv0JmQZ6WwQSdDDDtUTWFjAwg82c \ No newline at end of file diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go index a5e920aa5f..5adfe35468 100644 --- a/internal/configs/virtualserver.go +++ b/internal/configs/virtualserver.go @@ -1048,7 +1048,7 @@ func (p *policiesCfg) addRateLimitConfig( ) *validationResults { res := newValidationResults() - rlZoneName := fmt.Sprintf("pol_rl_%v_%v_%v_%v", polNamespace, polName, ownerDetails.vsNamespace, ownerDetails.vsName) + rlZoneName := rfc1123ToSnake(fmt.Sprintf("pol_rl_%v_%v_%v_%v", polNamespace, polName, ownerDetails.vsNamespace, ownerDetails.vsName)) if rateLimit.Condition != nil && rateLimit.Condition.JWT.Claim != "" && rateLimit.Condition.JWT.Match != "" { lrz := generateGroupedLimitReqZone(rlZoneName, rateLimit, podReplicas, ownerDetails) p.RateLimit.PolicyGroupMaps = append(p.RateLimit.PolicyGroupMaps, *generateLRZPolicyGroupMap(lrz)) @@ -1778,7 +1778,7 @@ func generateGroupedLimitReqZone(zoneName string, strings.ToLower(rateLimitPol.Condition.JWT.Match), ) - lrz.GroupVariable = fmt.Sprintf("$rl_%s_%s_group_%s", + lrz.GroupVariable = rfc1123ToSnake(fmt.Sprintf("$rl_%s_%s_group_%s", ownerDetails.vsNamespace, ownerDetails.vsName, strings.ToLower( @@ -1786,8 +1786,8 @@ func generateGroupedLimitReqZone(zoneName string, strings.Split(rateLimitPol.Condition.JWT.Claim, "."), "_", ), ), - ) - lrz.Key = fmt.Sprintf("$%s", strings.Replace(zoneName, "-", "_", -1)) + )) + lrz.Key = rfc1123ToSnake(fmt.Sprintf("$%s", zoneName)) lrz.PolicyResult = rateLimitPol.Key lrz.GroupDefault = rateLimitPol.Condition.Default lrz.GroupSource = generateAuthJwtClaimSetVariable(rateLimitPol.Condition.JWT.Claim, ownerDetails.vsNamespace, ownerDetails.vsName) @@ -1858,7 +1858,7 @@ func generateAuthJwtClaimSet(jwtCondition conf_v1.JWTCondition, owner policyOwne } func generateAuthJwtClaimSetVariable(claim string, vsNamespace string, vsName string) string { - return fmt.Sprintf("$jwt_%v_%v_%v", vsNamespace, vsName, strings.Join(strings.Split(claim, "."), "_")) + return strings.ReplaceAll(fmt.Sprintf("$jwt_%v_%v_%v", vsNamespace, vsName, strings.Join(strings.Split(claim, "."), "_")), "-", "_") } func generateAuthJwtClaimSetClaim(claim string) string { diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go index c1ada72302..d8f9caf6d6 100644 --- a/internal/configs/virtualserver_test.go +++ b/internal/configs/virtualserver_test.go @@ -6570,7 +6570,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { LimitReqZones: []version2.LimitReqZone{ { Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", ZoneSize: "10M", Rate: "10r/s", PolicyResult: "$jwt_claim_sub", @@ -6581,7 +6581,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { }, { Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", ZoneSize: "20M", Rate: "20r/s", PolicyResult: "$jwt_claim_sub", @@ -6598,8 +6598,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { VSNamespace: "default", VSName: "cafe", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -6805,7 +6805,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { LimitReqZones: []version2.LimitReqZone{ { Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", ZoneSize: "10M", Rate: "10r/s", PolicyResult: "$jwt_claim_sub", @@ -6816,7 +6816,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { }, { Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", ZoneSize: "20M", Rate: "20r/s", PolicyResult: "$jwt_claim_sub", @@ -6834,8 +6834,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { VSNamespace: "default", VSName: "cafe", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -7041,7 +7041,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { LimitReqZones: []version2.LimitReqZone{ { Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", ZoneSize: "10M", Rate: "10r/s", PolicyResult: "$jwt_claim_sub", @@ -7052,7 +7052,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { }, { Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", ZoneSize: "20M", Rate: "20r/s", PolicyResult: "$jwt_claim_sub", @@ -7081,8 +7081,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { ProxySetHeaders: []version2.Header{{Name: "Host", Value: "$host"}}, ServiceName: "tea-svc", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -7296,7 +7296,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { LimitReqZones: []version2.LimitReqZone{ { Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", ZoneSize: "10M", Rate: "10r/s", PolicyResult: "$jwt_claim_sub", @@ -7307,7 +7307,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { }, { Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", ZoneSize: "20M", Rate: "20r/s", PolicyResult: "$jwt_claim_sub", @@ -7350,8 +7350,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { VSRName: "tea", VSRNamespace: "default", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -7562,7 +7562,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { LimitReqZones: []version2.LimitReqZone{ { Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", ZoneSize: "10M", Rate: "10r/s", PolicyResult: "$jwt_claim_sub", @@ -7573,7 +7573,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { }, { Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", ZoneSize: "20M", Rate: "20r/s", PolicyResult: "$jwt_claim_sub", @@ -7591,8 +7591,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { VSNamespace: "default", VSName: "cafe", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -7625,8 +7625,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) { VSRName: "tea", VSRNamespace: "default", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -7857,7 +7857,7 @@ func TestGenerateVirtualServerConfigWithRateLimitGroupsWarning(t *testing.T) { }, { Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe", - ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", + ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", ZoneSize: "20M", Rate: "20r/s", PolicyResult: "$jwt_claim_sub", @@ -7874,8 +7874,8 @@ func TestGenerateVirtualServerConfigWithRateLimitGroupsWarning(t *testing.T) { VSNamespace: "default", VSName: "cafe", LimitReqs: []version2.LimitReq{ - {ZoneName: "pol_rl_default_premium-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, - {ZoneName: "pol_rl_default_basic-rate-limit-policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, + {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0}, }, LimitReqOptions: version2.LimitReqOptions{ DryRun: false, @@ -8131,7 +8131,7 @@ func TestGeneratePolicies(t *testing.T) { RateLimit: rateLimit{ Reqs: []version2.LimitReq{ { - ZoneName: "pol_rl_default_rateLimit-policy_default_test", + ZoneName: "pol_rl_default_rateLimit_policy_default_test", }, }, Zones: []version2.LimitReqZone{ @@ -8139,7 +8139,7 @@ func TestGeneratePolicies(t *testing.T) { Key: "test", ZoneSize: "10M", Rate: "10r/s", - ZoneName: "pol_rl_default_rateLimit-policy_default_test", + ZoneName: "pol_rl_default_rateLimit_policy_default_test", }, }, Options: version2.LimitReqOptions{ @@ -8188,13 +8188,13 @@ func TestGeneratePolicies(t *testing.T) { Key: "test", ZoneSize: "10M", Rate: "10r/s", - ZoneName: "pol_rl_default_rateLimit-policy_default_test", + ZoneName: "pol_rl_default_rateLimit_policy_default_test", }, { Key: "test2", ZoneSize: "20M", Rate: "20r/s", - ZoneName: "pol_rl_default_rateLimit-policy2_default_test", + ZoneName: "pol_rl_default_rateLimit_policy2_default_test", }, }, Options: version2.LimitReqOptions{ @@ -8203,10 +8203,10 @@ func TestGeneratePolicies(t *testing.T) { }, Reqs: []version2.LimitReq{ { - ZoneName: "pol_rl_default_rateLimit-policy_default_test", + ZoneName: "pol_rl_default_rateLimit_policy_default_test", }, { - ZoneName: "pol_rl_default_rateLimit-policy2_default_test", + ZoneName: "pol_rl_default_rateLimit_policy2_default_test", }, }, }, @@ -8240,7 +8240,7 @@ func TestGeneratePolicies(t *testing.T) { Key: "test", ZoneSize: "10M", Rate: "5r/s", - ZoneName: "pol_rl_default_rateLimitScale-policy_default_test", + ZoneName: "pol_rl_default_rateLimitScale_policy_default_test", }, }, Options: version2.LimitReqOptions{ @@ -8249,7 +8249,7 @@ func TestGeneratePolicies(t *testing.T) { }, Reqs: []version2.LimitReq{ { - ZoneName: "pol_rl_default_rateLimitScale-policy_default_test", + ZoneName: "pol_rl_default_rateLimitScale_policy_default_test", }, }, }, @@ -8962,13 +8962,13 @@ func TestGeneratePoliciesFails(t *testing.T) { Key: "test", ZoneSize: "10M", Rate: "10r/s", - ZoneName: "pol_rl_default_rateLimit-policy_default_test", + ZoneName: "pol_rl_default_rateLimit_policy_default_test", }, { Key: "test2", ZoneSize: "20M", Rate: "20r/s", - ZoneName: "pol_rl_default_rateLimit-policy2_default_test", + ZoneName: "pol_rl_default_rateLimit_policy2_default_test", }, }, Options: version2.LimitReqOptions{ @@ -8977,10 +8977,10 @@ func TestGeneratePoliciesFails(t *testing.T) { }, Reqs: []version2.LimitReq{ { - ZoneName: "pol_rl_default_rateLimit-policy_default_test", + ZoneName: "pol_rl_default_rateLimit_policy_default_test", }, { - ZoneName: "pol_rl_default_rateLimit-policy2_default_test", + ZoneName: "pol_rl_default_rateLimit_policy2_default_test", }, }, }, diff --git a/tests/Dockerfile b/tests/Dockerfile index 2e86c0726b..b78adc46c1 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,4 +1,4 @@ -# syntax=docker/dockerfile:1.5 +# syntax=docker/dockerfile:1.11 # this is here so we can grab the latest version of kind and have dependabot keep it up to date FROM kindest/node:v1.32.2@sha256:f226345927d7e348497136874b6d207e0b32cc52154ad8323129352923a3142f @@ -8,27 +8,23 @@ FROM quay.io/skopeo/stable:v1.17.0 FROM python:3.13@sha256:08471c63c5fdf2644adc142a7fa8d0290eb405cda14c473fbe5b4cd0933af601 RUN apt-get update \ - && apt-get install -y curl git \ + && apt-get install -y curl git apache2-utils \ && rm -rf /var/lib/apt/lists/* WORKDIR /workspace/tests -COPY --link tests/requirements.txt /workspace/tests/ -RUN pip install --require-hashes -r requirements.txt --no-deps -RUN playwright install --with-deps chromium +COPY --link tests/requirements.txt /workspace/tests/ COPY --link deployments /workspace/deployments COPY --link config /workspace/config +COPY --link tests /workspace/tests +COPY --link pyproject.toml /workspace/pyproject.toml RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl \ - && apt-get update && apt-get install -y apache2-utils + && curl https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz | tar xvz -C /tmp/ && mv /tmp/docker/docker /usr/bin/docker -RUN apt update -y \ - && curl https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz | tar xvz -C /tmp/ && mv /tmp/docker/docker /usr/bin/docker - -COPY --link tests /workspace/tests - -COPY --link pyproject.toml /workspace/ +RUN pip install --require-hashes -r requirements.txt --no-deps +RUN playwright install --with-deps chromium ENTRYPOINT ["python3", "-m", "pytest"] diff --git a/tests/Makefile b/tests/Makefile index f5677ca326..eaba1c5b6e 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -10,6 +10,7 @@ KUBE_CONFIG_FOLDER = ${HOME}/.kube KIND_KUBE_CONFIG_FOLDER = $(KUBE_CONFIG_FOLDER)/kind MINIKUBE_KUBE_CONFIG_FOLDER = $(KUBE_CONFIG_FOLDER)/minikube DOCKERFILEPATH := ${ROOT_DIR}/tests/Dockerfile +PYTHON ?= python3 IP_FAMILY = dual IC_TYPE ?= nginx-ingress ## The Ingress Controller type to use, "nginx-ingress" or "nginx-plus-ingress". Defaults to "nginx-ingress" SHOW_IC_LOGS ?= no ## Should the tests show the Ingress Controller logs on failure, "yes" or "no". Defaults to "no" @@ -36,7 +37,7 @@ help: ## Show available make targets .PHONY: build build: ## Run build - docker build -t $(TEST_PREFIX):$(TEST_TAG) -f $(DOCKERFILEPATH) .. + docker build -t $(TEST_PREFIX):$(TEST_TAG) -f $(DOCKERFILEPATH) $(ROOT_DIR) $(KUBE_CONFIG_FOLDER): @@ -51,9 +52,49 @@ $(MINIKUBE_KUBE_CONFIG_FOLDER): $(KUBE_CONFIG_FOLDER) @mkdir -p $@ +.PHONY: setup-venv +setup-venv: + $(PYTHON) -m venv $(ROOT_DIR)/tests/venv; + source $(ROOT_DIR)/tests/venv/bin/activate \ + && pip install --require-hashes -r requirements.txt --no-deps \ + && playwright install --with-deps chromium + + +.PHONY: clean-venv +clean-venv: + @rm -rf $(ROOT_DIR)/tests/venv + + +.PHONY: run-local-tests +run-local-tests: ## Run tests + source $(ROOT_DIR)/tests/venv/bin/activate \ + && pytest \ + --image=$(BUILD_IMAGE) \ + --image-pull-policy=$(PULL_POLICY) \ + --deployment-type=$(DEPLOYMENT_TYPE) \ + --ic-type=$(IC_TYPE) \ + --service=$(SERVICE) \ + --node-ip=$(NODE_IP) \ + --show-ic-logs=$(SHOW_IC_LOGS) \ + --plus-jwt=$(PLUS_JWT) \ + -sv \ + $(PYTEST_ARGS) + + .PHONY: run-tests run-tests: ## Run tests - docker run --rm -v $(KUBE_CONFIG_FOLDER):/root/.kube $(TEST_PREFIX):$(TEST_TAG) --context=$(CONTEXT) --image=$(BUILD_IMAGE) --image-pull-policy=$(PULL_POLICY) --deployment-type=$(DEPLOYMENT_TYPE) --ic-type=$(IC_TYPE) --service=$(SERVICE) --node-ip=$(NODE_IP) --show-ic-logs=$(SHOW_IC_LOGS) $(PYTEST_ARGS) + docker run --rm -v $(KUBE_CONFIG_FOLDER):/root/.kube \ + $(TEST_PREFIX):$(TEST_TAG) \ + --context=$(CONTEXT) \ + --image=$(BUILD_IMAGE) \ + --image-pull-policy=$(PULL_POLICY) \ + --deployment-type=$(DEPLOYMENT_TYPE) \ + --ic-type=$(IC_TYPE) \ + --service=$(SERVICE) \ + --node-ip=$(NODE_IP) \ + --show-ic-logs=$(SHOW_IC_LOGS) \ + --plus-jwt=$(PLUS_JWT) \ + $(PYTEST_ARGS) .PHONY: run-tests-in-kind diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-basic-no-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-basic-no-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..af63b5845b --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-basic-no-default-jwt-claim-sub.yaml @@ -0,0 +1,13 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-basic +spec: + rateLimit: + rate: 1r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Basic + claim: user_details.level diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-basic-with-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-basic-with-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..2f9e16b0b0 --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-basic-with-default-jwt-claim-sub.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-basic +spec: + rateLimit: + rate: 1r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Basic + claim: user_details.level + default: true diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-bronze-with-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-bronze-with-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..faebff7ab7 --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-bronze-with-default-jwt-claim-sub.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-bronze +spec: + rateLimit: + rate: 5r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Bronze + claim: user_details.tier + default: true diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-gold-no-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-gold-no-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..f435da4667 --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-gold-no-default-jwt-claim-sub.yaml @@ -0,0 +1,13 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-gold +spec: + rateLimit: + rate: 15r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Gold + claim: user_details.tier diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-premium-no-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-premium-no-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..580bbc13ba --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-premium-no-default-jwt-claim-sub.yaml @@ -0,0 +1,13 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-premium +spec: + rateLimit: + rate: 5r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Premium + claim: user_details.level diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-premium-with-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-premium-with-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..61afa897b6 --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-premium-with-default-jwt-claim-sub.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-premium +spec: + rateLimit: + rate: 5r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Premium + claim: user_details.level + default: true diff --git a/tests/data/rate-limit/policies/rate-limit-tiered-silver-no-default-jwt-claim-sub.yaml b/tests/data/rate-limit/policies/rate-limit-tiered-silver-no-default-jwt-claim-sub.yaml new file mode 100644 index 0000000000..0ff16f7a30 --- /dev/null +++ b/tests/data/rate-limit/policies/rate-limit-tiered-silver-no-default-jwt-claim-sub.yaml @@ -0,0 +1,13 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-jwt-claim-sub-silver +spec: + rateLimit: + rate: 10r/s + key: ${jwt_claim_sub} + zoneSize: 10M + condition: + jwt: + match: Silver + claim: user_details.tier diff --git a/tests/data/rate-limit/route-subroute/virtual-server-route-jwt-claim-sub.yaml b/tests/data/rate-limit/route-subroute/virtual-server-route-jwt-claim-sub.yaml new file mode 100644 index 0000000000..4f2d20e7da --- /dev/null +++ b/tests/data/rate-limit/route-subroute/virtual-server-route-jwt-claim-sub.yaml @@ -0,0 +1,22 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: rate-limit-jwt-claim-sub + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 diff --git a/tests/data/rate-limit/route-subroute/virtual-server-route-mutliple-tiered-jwt-claim-sub.yaml b/tests/data/rate-limit/route-subroute/virtual-server-route-mutliple-tiered-jwt-claim-sub.yaml new file mode 100644 index 0000000000..786ada0a5e --- /dev/null +++ b/tests/data/rate-limit/route-subroute/virtual-server-route-mutliple-tiered-jwt-claim-sub.yaml @@ -0,0 +1,27 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: rate-limit-jwt-claim-sub-basic + - name: rate-limit-jwt-claim-sub-premium + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 + policies: + - name: rate-limit-jwt-claim-sub-bronze + - name: rate-limit-jwt-claim-sub-silver + - name: rate-limit-jwt-claim-sub-gold diff --git a/tests/data/rate-limit/route-subroute/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml b/tests/data/rate-limit/route-subroute/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml new file mode 100644 index 0000000000..cc3dcd1dc5 --- /dev/null +++ b/tests/data/rate-limit/route-subroute/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml @@ -0,0 +1,23 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: rate-limit-jwt-claim-sub-basic + - name: rate-limit-jwt-claim-sub-premium + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 diff --git a/tests/data/rate-limit/route-subroute/virtual-server-route-tiered-bronze-silver-gold-jwt-claim-sub.yaml b/tests/data/rate-limit/route-subroute/virtual-server-route-tiered-bronze-silver-gold-jwt-claim-sub.yaml new file mode 100644 index 0000000000..0699a7f3d6 --- /dev/null +++ b/tests/data/rate-limit/route-subroute/virtual-server-route-tiered-bronze-silver-gold-jwt-claim-sub.yaml @@ -0,0 +1,24 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: backends +spec: + host: virtual-server-route.example.com + upstreams: + - name: backend1 + service: backend1-svc + port: 80 + - name: backend3 + service: backend3-svc + port: 80 + subroutes: + - path: "/backends/backend1" + policies: + - name: rate-limit-jwt-claim-sub-bronze + - name: rate-limit-jwt-claim-sub-silver + - name: rate-limit-jwt-claim-sub-gold + action: + pass: backend1 + - path: "/backends/backend3" + action: + pass: backend3 diff --git a/tests/data/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-route-override.yaml b/tests/data/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-route-override.yaml new file mode 100644 index 0000000000..99b8784f65 --- /dev/null +++ b/tests/data/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-route-override.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server-route +spec: + host: virtual-server-route.example.com + policies: + - name: rate-limit-jwt-claim-sub-basic + - name: rate-limit-jwt-claim-sub-premium + routes: + - path: "/backends" + route: backends + - path: "/backend2" + route: backend2-namespace/backend2 diff --git a/tests/data/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-spec-override.yaml b/tests/data/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-spec-override.yaml new file mode 100644 index 0000000000..0b0778eafc --- /dev/null +++ b/tests/data/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-spec-override.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server-route +spec: + host: virtual-server-route.example.com + routes: + - path: "/backends" + policies: + - name: rate-limit-jwt-claim-sub-basic + - name: rate-limit-jwt-claim-sub-premium + route: backends + - path: "/backend2" + route: backend2-namespace/backend2 diff --git a/tests/data/rate-limit/spec/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml b/tests/data/rate-limit/spec/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml new file mode 100644 index 0000000000..4571a211f0 --- /dev/null +++ b/tests/data/rate-limit/spec/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml @@ -0,0 +1,23 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + host: virtual-server.example.com + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + policies: + - name: rate-limit-jwt-claim-sub-basic + - name: rate-limit-jwt-claim-sub-premium + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/data/rate-limit/spec/virtual-server-tiered-basic-premium-jwt-claim-sub.yaml b/tests/data/rate-limit/spec/virtual-server-tiered-basic-premium-jwt-claim-sub.yaml new file mode 100644 index 0000000000..e3006859db --- /dev/null +++ b/tests/data/rate-limit/spec/virtual-server-tiered-basic-premium-jwt-claim-sub.yaml @@ -0,0 +1,23 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: virtual-server +spec: + host: virtual-server.example.com + policies: + - name: rate-limit-jwt-claim-sub-basic + - name: rate-limit-jwt-claim-sub-premium + upstreams: + - name: backend2 + service: backend2-svc + port: 80 + - name: backend1 + service: backend1-svc + port: 80 + routes: + - path: "/backend1" + action: + pass: backend1 + - path: "/backend2" + action: + pass: backend2 diff --git a/tests/requirements.txt b/tests/requirements.txt index e8b5a8a3ee..3559397783 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -332,7 +332,9 @@ greenlet==3.1.1 \ --hash=sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803 \ --hash=sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79 \ --hash=sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f - # via playwright + # via + # -r requirements.in + # playwright grpcio==1.68.1 \ --hash=sha256:025f790c056815b3bf53da850dd70ebb849fd755a4b1ac822cb65cd631e37d43 \ --hash=sha256:04cfd68bf4f38f5bb959ee2361a7546916bd9a50f78617a346b3aeb2b42e2161 \ @@ -614,7 +616,13 @@ pycparser==2.22 \ pyee==12.0.0 \ --hash=sha256:7b14b74320600049ccc7d0e0b1becd3b4bd0a03c745758225e31a59f4095c990 \ --hash=sha256:c480603f4aa2927d4766eb41fa82793fe60a82cbfdb8d688e0d08c55a534e145 - # via playwright + # via + # -r requirements.in + # playwright +pyjwt==2.10.1 \ + --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ + --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb + # via -r requirements.in pyopenssl==24.3.0 \ --hash=sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36 \ --hash=sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a @@ -733,7 +741,9 @@ six==1.17.0 \ typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 - # via pyee + # via + # -r requirements.in + # pyee urllib3==2.2.3 \ --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 diff --git a/tests/suite/test_rl_policies.py b/tests/suite/test_rl_policies.py index 2d8185ea58..c02d8e2e63 100644 --- a/tests/suite/test_rl_policies.py +++ b/tests/suite/test_rl_policies.py @@ -1,12 +1,21 @@ import time +import jwt import pytest import requests from settings import TEST_DATA from suite.utils.custom_resources_utils import read_custom_resource -from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy -from suite.utils.resources_utils import get_pod_list, get_vs_nginx_template_conf, scale_deployment, wait_before_test +from suite.utils.policy_resources_utils import apply_and_assert_valid_policy, create_policy_from_yaml, delete_policy +from suite.utils.resources_utils import ( + get_pod_list, + get_vs_nginx_template_conf, + scale_deployment, + wait_before_test, + wait_for_event, +) from suite.utils.vs_vsr_resources_utils import ( + apply_and_assert_valid_vs, + apply_and_assert_warning_vs, create_virtual_server_from_yaml, delete_virtual_server, patch_virtual_server_from_yaml, @@ -26,7 +35,24 @@ rl_vs_override_spec_route = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-override-spec-route.yaml" rl_vs_jwt_claim_sub = f"{TEST_DATA}/rate-limit/spec/virtual-server-jwt-claim-sub.yaml" rl_pol_jwt_claim_sub = f"{TEST_DATA}/rate-limit/policies/rate-limit-jwt-claim-sub.yaml" -token = f"{TEST_DATA}/jwt-policy/token.jwt" +rl_vs_basic_premium_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/spec/virtual-server-tiered-basic-premium-jwt-claim-sub.yaml" +) +rl_vs_route_basic_premium_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/spec/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml" +) +rl_pol_basic_no_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-basic-no-default-jwt-claim-sub.yaml" +) +rl_pol_premium_no_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-premium-no-default-jwt-claim-sub.yaml" +) +rl_pol_basic_with_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-basic-with-default-jwt-claim-sub.yaml" +) +rl_pol_premium_with_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-premium-with-default-jwt-claim-sub.yaml" +) @pytest.mark.policies @@ -59,6 +85,34 @@ def restore_default_vs(self, kube_apis, virtual_server_setup) -> None: create_virtual_server_from_yaml(kube_apis.custom_objects, std_vs_src, virtual_server_setup.namespace) wait_before_test() + def check_rate_limit_eq(self, url, code, counter, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + assert occur.count(code) in range(counter, counter + 2) + + def check_rate_limit_nearly_eq(self, url, code, counter, plus_minus=1, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + lower_range = counter + if counter > 1: + lower_range = counter - plus_minus + upper_range = counter + plus_minus + 1 # add an extra 1 to account for range + assert occur.count(code) in range(lower_range, upper_range) + @pytest.mark.smoke @pytest.mark.parametrize("src", [rl_vs_pri_src]) def test_rl_policy_1rs( @@ -72,40 +126,26 @@ def test_rl_policy_1rs( """ Test if rate-limiting policy is working with 1 rps """ - print(f"Create rl policy") - pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_pri_src, test_namespace) - print(f"Patch vs with policy: {src}") - patch_virtual_server_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_pri_src) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, virtual_server_setup.vs_name, src, - virtual_server_setup.namespace, ) - wait_before_test() - policy_info = read_custom_resource(kube_apis.custom_objects, test_namespace, "policies", pol_name) - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + # Run rate limit test 1r/s + self.check_rate_limit_nearly_eq( virtual_server_setup.backend_1_url, + 200, + 1, headers={"host": virtual_server_setup.vs_host}, ) - print(resp.status_code) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - virtual_server_setup.backend_1_url, - headers={"host": virtual_server_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name, test_namespace) self.restore_default_vs(kube_apis, virtual_server_setup) - assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" - ) - assert occur.count(200) <= 1 @pytest.mark.parametrize("src", [rl_vs_sec_src]) def test_rl_policy_5rs( @@ -119,40 +159,26 @@ def test_rl_policy_5rs( """ Test if rate-limiting policy is working with 5 rps """ - rate_sec = 5 - print(f"Create rl policy") - pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_sec_src, test_namespace) - print(f"Patch vs with policy: {src}") - patch_virtual_server_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_sec_src) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, virtual_server_setup.vs_name, src, - virtual_server_setup.namespace, ) - wait_before_test() - policy_info = read_custom_resource(kube_apis.custom_objects, test_namespace, "policies", pol_name) - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + # Run rate limit test 5r/s + self.check_rate_limit_nearly_eq( virtual_server_setup.backend_1_url, + 200, + 5, headers={"host": virtual_server_setup.vs_host}, ) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - virtual_server_setup.backend_1_url, - headers={"host": virtual_server_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name, test_namespace) self.restore_default_vs(kube_apis, virtual_server_setup) - assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" - ) - assert rate_sec >= occur.count(200) >= (rate_sec - 2) @pytest.mark.parametrize("src", [rl_vs_invalid]) def test_rl_policy_invalid( @@ -240,34 +266,28 @@ def test_rl_override( """ List multiple policies in vs and test if the one with less rps is used """ - print(f"Create rl policy") - pol_name_pri = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_pri_src, test_namespace) - pol_name_sec = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_sec_src, test_namespace) - print(f"Patch vs with policy: {src}") - patch_virtual_server_from_yaml( - kube_apis.custom_objects, + pol_name_pri = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_pri_src) + pol_name_sec = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_sec_src) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, virtual_server_setup.vs_name, src, - virtual_server_setup.namespace, ) - wait_before_test() - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + + # Run rate limit test 1r/s + self.check_rate_limit_nearly_eq( virtual_server_setup.backend_1_url, + 200, + 1, headers={"host": virtual_server_setup.vs_host}, ) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - virtual_server_setup.backend_1_url, - headers={"host": virtual_server_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name_pri, test_namespace) delete_policy(kube_apis.custom_objects, pol_name_sec, test_namespace) self.restore_default_vs(kube_apis, virtual_server_setup) - assert occur.count(200) <= 1 @pytest.mark.parametrize("src", [rl_vs_override_spec_route]) def test_rl_override_spec_route( @@ -283,35 +303,28 @@ def test_rl_override_spec_route( route:policy = secondary (5 rps) spec:policy = primary (1 rps) """ - rate_sec = 5 - print(f"Create rl policy") - pol_name_pri = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_pri_src, test_namespace) - pol_name_sec = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_sec_src, test_namespace) - print(f"Patch vs with policy: {src}") - patch_virtual_server_from_yaml( - kube_apis.custom_objects, + pol_name_pri = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_pri_src) + pol_name_sec = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_sec_src) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, virtual_server_setup.vs_name, src, - virtual_server_setup.namespace, ) - wait_before_test() - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + + # Run rate limit test 5r/s + self.check_rate_limit_nearly_eq( virtual_server_setup.backend_1_url, + 200, + 5, headers={"host": virtual_server_setup.vs_host}, ) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - virtual_server_setup.backend_1_url, - headers={"host": virtual_server_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name_pri, test_namespace) delete_policy(kube_apis.custom_objects, pol_name_sec, test_namespace) self.restore_default_vs(kube_apis, virtual_server_setup) - assert rate_sec >= occur.count(200) >= (rate_sec - 2) @pytest.mark.parametrize("src", [rl_vs_pri_sca_src]) def test_rl_policy_scaled( @@ -329,18 +342,16 @@ def test_rl_policy_scaled( ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 4) - print(f"Create rl policy") - pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_pri_sca_src, test_namespace) - print(f"Patch vs with policy: {src}") - patch_virtual_server_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_pri_sca_src) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, virtual_server_setup.vs_name, src, - virtual_server_setup.namespace, ) - wait_before_test() - policy_info = read_custom_resource(kube_apis.custom_objects, test_namespace, "policies", pol_name) ic_pods = get_pod_list(kube_apis.v1, ns) for i in range(len(ic_pods)): conf = get_vs_nginx_template_conf( @@ -355,11 +366,6 @@ def test_rl_policy_scaled( scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1) delete_policy(kube_apis.custom_objects, pol_name, test_namespace) self.restore_default_vs(kube_apis, virtual_server_setup) - assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" - ) @pytest.mark.skip_for_nginx_oss @pytest.mark.parametrize("src", [rl_vs_jwt_claim_sub]) @@ -374,39 +380,426 @@ def test_rl_policy_jwt_claim_sub( ): """ Test if rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key + Policy is applied at the VirtualServer Spec level """ - print(f"Create rl policy") - pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_jwt_claim_sub, test_namespace) - print(f"Patch vs with policy: {src}") - patch_virtual_server_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_jwt_claim_sub) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, virtual_server_setup.vs_name, src, - virtual_server_setup.namespace, ) - wait_before_test() - policy_info = read_custom_resource(kube_apis.custom_objects, test_namespace, "policies", pol_name) - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + jwt_token = jwt.encode( + {"sub": "client1"}, + "nginx", + algorithm="HS256", + ) + + self.check_rate_limit_nearly_eq( virtual_server_setup.backend_1_url, - headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {token}"}, + 200, + 1, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {jwt_token}"}, + ) + wait_before_test(1) + + delete_policy(kube_apis.custom_objects, pol_name, test_namespace) + self.restore_default_vs(kube_apis, virtual_server_setup) + + +@pytest.mark.policies +@pytest.mark.policies_rl +@pytest.mark.parametrize( + "crd_ingress_controller, virtual_server_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + f"-enable-custom-resources", + f"-enable-leader-election=false", + ], + }, + { + "example": "rate-limit", + "app_type": "simple", + }, ) - print(resp.status_code) + ], + indirect=True, +) +class TestTieredRateLimitingPolicies: + def restore_default_vs(self, kube_apis, virtual_server_setup) -> None: + """ + Restore VirtualServer without policy spec + """ + delete_virtual_server(kube_apis.custom_objects, virtual_server_setup.vs_name, virtual_server_setup.namespace) + create_virtual_server_from_yaml(kube_apis.custom_objects, std_vs_src, virtual_server_setup.namespace) wait_before_test() - assert resp.status_code == 200 + + def check_rate_limit_eq(self, url, code, counter, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 while time.perf_counter() < t_end: resp = requests.get( - virtual_server_setup.backend_1_url, - headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {token}"}, + url, + headers=headers, ) occur.append(resp.status_code) - delete_policy(kube_apis.custom_objects, pol_name, test_namespace) + wait_before_test(delay) + assert occur.count(code) in range(counter, counter + 2) + + def check_rate_limit_nearly_eq(self, url, code, counter, plus_minus=1, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + lower_range = counter + if counter > 1: + lower_range = counter - plus_minus + upper_range = counter + plus_minus + 1 # add an extra 1 to account for range + assert occur.count(code) in range(lower_range, upper_range) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vs_basic_premium_jwt_claim_sub]) + def test_speclevel_rl_policy_tiered_basic_premium_no_default_jwt_claim_sub( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test if basic rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key, + if premium rate-limiting policy is working with 5 rps using $jwt_claim_sub as the rate limit key & + if the default is unlimited when no default policy is applied. + Policies are applied at the VirtualServer Spec level + """ + basic_pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_basic_no_default_jwt_claim_sub) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, + virtual_server_setup.vs_name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + + ##  Test Basic Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 1, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 5, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Default Rate Limit unlimited + self.check_rate_limit_eq( + virtual_server_setup.backend_1_url, 503, 0, headers={"host": virtual_server_setup.vs_host} + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, test_namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, test_namespace) + self.restore_default_vs(kube_apis, virtual_server_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vs_basic_premium_jwt_claim_sub]) + def test_speclevel_rl_policy_tiered_basic_premium_with_default_jwt_claim_sub( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test if basic rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key, + if premium rate-limiting policy is working with 5 rps using $jwt_claim_sub as the rate limit key & + if the default basic rate limit of 1r/s is applied. + Policies are applied at the VirtualServer Spec level + """ + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, + virtual_server_setup.vs_name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + + ##  Test Default Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 1, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 5, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Default Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, 200, 1, headers={"host": virtual_server_setup.vs_host} + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, test_namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, test_namespace) + self.restore_default_vs(kube_apis, virtual_server_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vs_route_basic_premium_jwt_claim_sub]) + def test_routelevel_rl_policy_tiered_basic_premium_with_default_jwt_claim_sub( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test if basic rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key, + if premium rate-limiting policy is working with 5 rps using $jwt_claim_sub as the rate limit key, + if the default basic rate limit of 1r/s is applied & + if a route without policies is unlimited. + Policies are applied at the VirtualServer Route level + """ + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, + virtual_server_setup.vs_name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + + ##  Test Basic Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 1, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 5, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Default Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, 200, 1, headers={"host": virtual_server_setup.vs_host} + ) + wait_before_test(1) + + ##  Test different backend route + self.check_rate_limit_eq( + virtual_server_setup.backend_2_url, 503, 0, headers={"host": virtual_server_setup.vs_host} + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, test_namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, test_namespace) + self.restore_default_vs(kube_apis, virtual_server_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vs_route_basic_premium_jwt_claim_sub]) + def test_routelevel_rl_policy_tiered_basic_premium_no_default_jwt_claim_sub( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test if basic rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key, + if premium rate-limiting policy is working with 5 rps using $jwt_claim_sub as the rate limit key, + if the default is unlimited when no default policy is applied & + if a route without policies is unlimited. + Policies are applied at the VirtualServer Route level + """ + basic_pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_basic_no_default_jwt_claim_sub) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + + # Patch VirtualServer + apply_and_assert_valid_vs( + kube_apis, + virtual_server_setup.namespace, + virtual_server_setup.vs_name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 1, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + virtual_server_setup.backend_1_url, + 200, + 5, + headers={"host": virtual_server_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Default Rate Limit unlimited + self.check_rate_limit_eq( + virtual_server_setup.backend_1_url, 503, 0, headers={"host": virtual_server_setup.vs_host} + ) + wait_before_test(1) + + ##  Test different backend route + self.check_rate_limit_eq( + virtual_server_setup.backend_2_url, 503, 0, headers={"host": virtual_server_setup.vs_host} + ) + wait_before_test(1) + + delete_policy(kube_apis.custom_objects, basic_pol_name, test_namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, test_namespace) self.restore_default_vs(kube_apis, virtual_server_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vs_route_basic_premium_jwt_claim_sub]) + def test_rl_duplicate_default_policy_tiered_basic_premium_with_default_jwt_claim_sub( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + virtual_server_setup, + test_namespace, + src, + ): + """ + Test if when both a basic and premium rate-limiting policy are the default for the tier, + the VS goes into a Invalid state and emits a Warning Event. + Policies are applied at the VirtualServer Route level + """ + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, test_namespace, rl_pol_premium_with_default_jwt_claim_sub + ) + + # Patch VirtualServer + apply_and_assert_warning_vs( + kube_apis, + virtual_server_setup.namespace, + virtual_server_setup.vs_name, + src, + ) + + # Assert that the 'AddedOrUpdatedWithWarning' event is present assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" + wait_for_event( + kube_apis.v1, + f"Tiered rate-limit Policies on [{virtual_server_setup.namespace}/{virtual_server_setup.vs_name}] contain conflicting default values", + virtual_server_setup.namespace, + 30, + ) + is True ) - assert occur.count(200) <= 1 + + delete_policy(kube_apis.custom_objects, basic_pol_name, test_namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, test_namespace) + self.restore_default_vs(kube_apis, virtual_server_setup) diff --git a/tests/suite/test_rl_policies_vsr.py b/tests/suite/test_rl_policies_vsr.py index a31bff2281..3aabb9f1b1 100644 --- a/tests/suite/test_rl_policies_vsr.py +++ b/tests/suite/test_rl_policies_vsr.py @@ -1,15 +1,19 @@ import time +import jwt import pytest import requests from settings import TEST_DATA from suite.utils.custom_resources_utils import read_custom_resource -from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy -from suite.utils.resources_utils import get_pod_list, scale_deployment, wait_before_test +from suite.utils.policy_resources_utils import apply_and_assert_valid_policy, create_policy_from_yaml, delete_policy +from suite.utils.resources_utils import get_pod_list, scale_deployment, wait_before_test, wait_for_event from suite.utils.vs_vsr_resources_utils import ( + apply_and_assert_valid_vs, + apply_and_assert_valid_vsr, + apply_and_assert_warning_vsr, + delete_and_create_v_s_route_from_yaml, + delete_and_create_vs_from_yaml, get_vs_nginx_template_conf, - patch_v_s_route_from_yaml, - patch_virtual_server_from_yaml, ) std_vs_src = f"{TEST_DATA}/virtual-server-route/standard/virtual-server.yaml" @@ -24,6 +28,44 @@ rl_vsr_override_src = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-override-subroute.yaml" rl_vsr_override_vs_spec_src = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-vsr-spec-override.yaml" rl_vsr_override_vs_route_src = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-vsr-route-override.yaml" +rl_vsr_override_tiered_basic_premium_vs_spec_src = ( + f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-spec-override.yaml" +) +rl_vsr_override_tiered_basic_premium_vs_route_src = ( + f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-vsr-tiered-basic-premium-route-override.yaml" +) +rl_vsr_jwt_claim_sub_src = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-jwt-claim-sub.yaml" +rl_pol_jwt_claim_sub_src = f"{TEST_DATA}/rate-limit/policies/rate-limit-jwt-claim-sub.yaml" +rl_vsr_basic_premium_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-tiered-basic-premium-jwt-claim-sub.yaml" +) +rl_vsr_bronze_silver_gold_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-tiered-bronze-silver-gold-jwt-claim-sub.yaml" +) +rl_vsr_multiple_tiered_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-mutliple-tiered-jwt-claim-sub.yaml" +) +rl_pol_basic_no_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-basic-no-default-jwt-claim-sub.yaml" +) +rl_pol_premium_no_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-premium-no-default-jwt-claim-sub.yaml" +) +rl_pol_basic_with_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-basic-with-default-jwt-claim-sub.yaml" +) +rl_pol_premium_with_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-premium-with-default-jwt-claim-sub.yaml" +) +rl_pol_bronze_with_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-bronze-with-default-jwt-claim-sub.yaml" +) +rl_pol_silver_no_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-silver-no-default-jwt-claim-sub.yaml" +) +rl_pol_gold_no_default_jwt_claim_sub = ( + f"{TEST_DATA}/rate-limit/policies/rate-limit-tiered-gold-no-default-jwt-claim-sub.yaml" +) @pytest.mark.policies @@ -50,7 +92,7 @@ def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None: Function to revert vsr deployments to valid state """ patch_src_m = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml" - patch_v_s_route_from_yaml( + delete_and_create_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, patch_src_m, @@ -58,6 +100,34 @@ def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None: ) wait_before_test() + def check_rate_limit_eq(self, url, code, counter, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + assert occur.count(code) in range(counter, counter + 2) + + def check_rate_limit_nearly_eq(self, url, code, counter, plus_minus=1, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + lower_range = counter + if counter > 1: + lower_range = counter - plus_minus + upper_range = counter + plus_minus + 1 # add an extra 1 to account for range + assert occur.count(code) in range(lower_range, upper_range) + @pytest.mark.smoke @pytest.mark.parametrize("src", [rl_vsr_pri_src]) def test_rl_policy_1rs_vsr( @@ -74,42 +144,24 @@ def test_rl_policy_1rs_vsr( """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" - print(f"Create rl policy") - pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace) - print(f"Patch vsr with policy: {src}") - patch_v_s_route_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_pri_src) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, v_s_route_setup.route_m.name, src, - v_s_route_setup.route_m.namespace, ) - wait_before_test() - policy_info = read_custom_resource( - kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name - ) - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + self.check_rate_limit_nearly_eq( f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, headers={"host": v_s_route_setup.vs_host}, ) - print(resp.status_code) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - f"{req_url}{v_s_route_setup.route_m.paths[0]}", - headers={"host": v_s_route_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) - assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" - ) - assert occur.count(200) <= 1 @pytest.mark.parametrize("src", [rl_vsr_sec_src]) def test_rl_policy_5rs_vsr( @@ -124,44 +176,24 @@ def test_rl_policy_5rs_vsr( """ Test if rate-limiting policy is working with ~5 rps in vsr:subroute """ - rate_sec = 5 req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" - print(f"Create rl policy") - pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_sec_src, v_s_route_setup.route_m.namespace) - print(f"Patch vsr with policy: {src}") - patch_v_s_route_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_sec_src) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, v_s_route_setup.route_m.name, src, - v_s_route_setup.route_m.namespace, ) - wait_before_test() - policy_info = read_custom_resource( - kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name - ) - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + self.check_rate_limit_nearly_eq( f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, headers={"host": v_s_route_setup.vs_host}, ) - print(resp.status_code) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - f"{req_url}{v_s_route_setup.route_m.paths[0]}", - headers={"host": v_s_route_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) - self.restore_default_vsr(kube_apis, v_s_route_setup) - assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" - ) - assert rate_sec >= occur.count(200) >= (rate_sec - 2) @pytest.mark.parametrize("src", [rl_vsr_override_src]) def test_rl_policy_override_vsr( @@ -179,40 +211,26 @@ def test_rl_policy_override_vsr( """ req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" - print(f"Create rl policy: 1rps") - pol_name_pri = create_policy_from_yaml( - kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace - ) - print(f"Create rl policy: 5rps") - pol_name_sec = create_policy_from_yaml( - kube_apis.custom_objects, rl_pol_sec_src, v_s_route_setup.route_m.namespace - ) - print(f"Patch vsr with policy: {src}") - patch_v_s_route_from_yaml( - kube_apis.custom_objects, + pol_name_pri = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_pri_src) + pol_name_sec = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_sec_src) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, v_s_route_setup.route_m.name, src, - v_s_route_setup.route_m.namespace, ) - wait_before_test() - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + + self.check_rate_limit_nearly_eq( f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, headers={"host": v_s_route_setup.vs_host}, ) - print(resp.status_code) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - f"{req_url}{v_s_route_setup.route_m.paths[0]}", - headers={"host": v_s_route_setup.vs_host}, - ) - occur.append(resp.status_code) + delete_policy(kube_apis.custom_objects, pol_name_pri, v_s_route_setup.route_m.namespace) delete_policy(kube_apis.custom_objects, pol_name_sec, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) - assert occur.count(200) <= 1 @pytest.mark.parametrize("src", [rl_vsr_pri_src]) def test_rl_policy_deleted_vsr( @@ -231,7 +249,7 @@ def test_rl_policy_deleted_vsr( print(f"Create rl policy") pol_name = create_policy_from_yaml(kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace) print(f"Patch vsr with policy: {src}") - patch_v_s_route_from_yaml( + delete_and_create_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, src, @@ -271,7 +289,7 @@ def test_rl_policy_invalid_vsr( kube_apis.custom_objects, rl_pol_invalid_src, v_s_route_setup.route_m.namespace ) print(f"Patch vsr with policy: {src}") - patch_v_s_route_from_yaml( + delete_and_create_v_s_route_from_yaml( kube_apis.custom_objects, v_s_route_setup.route_m.name, src, @@ -313,54 +331,40 @@ def test_override_vs_vsr( Test if vsr subroute policy overrides vs spec policy And vsr subroute policy overrides vs route policy """ - rate_sec = 5 req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" # policy for virtualserver - print(f"Create rl policy: 1rps") - pol_name_vs = create_policy_from_yaml( - kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace - ) - # policy for virtualserverroute - print(f"Create rl policy: 5rps") - pol_name_vsr = create_policy_from_yaml( - kube_apis.custom_objects, rl_pol_sec_src, v_s_route_setup.route_m.namespace - ) + pol_name_pri = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_pri_src) + pol_name_sec = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_sec_src) # patch vsr with 5rps policy - patch_v_s_route_from_yaml( - kube_apis.custom_objects, + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, v_s_route_setup.route_m.name, rl_vsr_sec_src, - v_s_route_setup.route_m.namespace, ) + # patch vs with 1rps policy - patch_virtual_server_from_yaml( - kube_apis.custom_objects, v_s_route_setup.vs_name, src, v_s_route_setup.namespace + apply_and_assert_valid_vs( + kube_apis, + v_s_route_setup.namespace, + v_s_route_setup.vs_name, + src, ) - wait_before_test() - occur = [] - t_end = time.perf_counter() + 1 - resp = requests.get( + + self.check_rate_limit_nearly_eq( f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, headers={"host": v_s_route_setup.vs_host}, ) - print(resp.status_code) - assert resp.status_code == 200 - while time.perf_counter() < t_end: - resp = requests.get( - f"{req_url}{v_s_route_setup.route_m.paths[0]}", - headers={"host": v_s_route_setup.vs_host}, - ) - occur.append(resp.status_code) - - delete_policy(kube_apis.custom_objects, pol_name_vs, v_s_route_setup.route_m.namespace) - delete_policy(kube_apis.custom_objects, pol_name_vsr, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, pol_name_pri, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, pol_name_sec, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) - patch_virtual_server_from_yaml( + delete_and_create_vs_from_yaml( kube_apis.custom_objects, v_s_route_setup.vs_name, std_vs_src, v_s_route_setup.namespace ) - assert rate_sec >= occur.count(200) >= (rate_sec - 2) @pytest.mark.parametrize("src", [rl_vsr_pri_sca_src]) def test_rl_policy_scaled_vsr( @@ -380,21 +384,13 @@ def test_rl_policy_scaled_vsr( ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 4) - print(f"Create rl policy") - pol_name = create_policy_from_yaml( - kube_apis.custom_objects, rl_pol_pri_sca_src, v_s_route_setup.route_m.namespace - ) - print(f"Patch vsr with policy: {src}") - patch_v_s_route_from_yaml( - kube_apis.custom_objects, + pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_pri_sca_src) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, v_s_route_setup.route_m.name, src, - v_s_route_setup.route_m.namespace, - ) - - wait_before_test() - policy_info = read_custom_resource( - kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name ) ic_pods = get_pod_list(kube_apis.v1, ns) @@ -405,14 +401,603 @@ def test_rl_policy_scaled_vsr( v_s_route_setup.vs_name, ic_pods[i].metadata.name, ingress_controller_prerequisites.namespace, + print_log=False, ) assert "rate=10r/s" in conf # restore replicas, policy and vsr scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1) delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) self.restore_default_vsr(kube_apis, v_s_route_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vsr_jwt_claim_sub_src]) + def test_rl_policy_jwt_claim_sub_vsr( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + src, + ): + """ + Test if rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key in vsr:subroute + """ + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_jwt_claim_sub_src) + + print(f"Patch vsr with policy: {src}") + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, + v_s_route_setup.route_m.name, + src, + ) + + jwt_token = jwt.encode( + {"sub": "client1"}, + "nginx", + algorithm="HS256", + ) + + ##  Test Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {jwt_token}"}, + ) + + delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace) + self.restore_default_vsr(kube_apis, v_s_route_setup) + + +@pytest.mark.policies +@pytest.mark.policies_rl +@pytest.mark.parametrize( + "crd_ingress_controller, v_s_route_setup", + [ + ( + { + "type": "complete", + "extra_args": [ + f"-enable-custom-resources", + f"-enable-leader-election=false", + ], + }, + {"example": "virtual-server-route"}, + ) + ], + indirect=True, +) +class TestTieredRateLimitingPoliciesVsr: + def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None: + """ + Function to revert vsr deployments to valid state + """ + patch_src_m = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml" + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, + v_s_route_setup.route_m.name, + patch_src_m, + v_s_route_setup.route_m.namespace, + ) + wait_before_test() + + def check_rate_limit_eq(self, url, code, counter, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + assert occur.count(code) in range(counter, counter + 2) + + def check_rate_limit_nearly_eq(self, url, code, counter, plus_minus=1, delay=0.01, headers={}): + occur = [] + t_end = time.perf_counter() + 1 + while time.perf_counter() < t_end: + resp = requests.get( + url, + headers=headers, + ) + occur.append(resp.status_code) + wait_before_test(delay) + lower_range = counter + if counter > 1: + lower_range = counter - plus_minus + upper_range = counter + plus_minus + 1 # add an extra 1 to account for range + assert occur.count(code) in range(lower_range, upper_range) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vsr_basic_premium_jwt_claim_sub]) + def test_rl_policy_tiered_basic_premium_no_default_jwt_claim_sub_vsr( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + src, + ): + """ + Test if basic rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key, + if premium rate-limiting policy is working with 5 rps using $jwt_claim_sub as the rate limit key & + if the default is unlimited when no default policy is applied. + Policies are applied at the VirtualServerRoute level + """ + + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_basic_no_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, + v_s_route_setup.route_m.name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + + ##  Test Basic Rate Limit 1r/s+ + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Default Rate Limit unlimited + self.check_rate_limit_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 503, + 0, + headers={"host": v_s_route_setup.vs_host}, + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, v_s_route_setup.route_m.namespace) + self.restore_default_vsr(kube_apis, v_s_route_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vsr_basic_premium_jwt_claim_sub]) + def test_rl_policy_tiered_basic_premium_with_default_jwt_claim_sub_vsr( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + src, + ): + """ + Test if basic rate-limiting policy is working with 1 rps using $jwt_claim_sub as the rate limit key, + if premium rate-limiting policy is working with 5 rps using $jwt_claim_sub as the rate limit key & + if the default basic rate limit of 1r/s is applied. + Policies are applied at the VirtualServerRoute level + """ + + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, + v_s_route_setup.route_m.name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + + ##  Test Basic Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Default Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host}, + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, v_s_route_setup.route_m.namespace) + self.restore_default_vsr(kube_apis, v_s_route_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vsr_multiple_tiered_jwt_claim_sub]) + def test_rl_policy_multiple_tiered_jwt_claim_sub_vsr( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + src, + ): + """ + Test applying a basic/premium tier to /backend1 &, + applying a bronze/silver/gold tier to /backend3. + Policies are applied at the VirtualServerRoute level + """ + + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + bronze_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_bronze_with_default_jwt_claim_sub + ) + silver_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_silver_no_default_jwt_claim_sub + ) + gold_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_gold_no_default_jwt_claim_sub + ) + + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, + v_s_route_setup.route_m.name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + bronze_jwt_token = jwt.encode( + {"user_details": {"tier": "Bronze"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + silver_jwt_token = jwt.encode( + {"user_details": {"tier": "Silver"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + gold_jwt_token = jwt.encode( + {"user_details": {"tier": "Gold"}, "sub": "client3"}, + "nginx", + algorithm="HS256", + ) + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + + ##  Test Basic Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Basic Default Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host}, + ) + wait_before_test(1) + + ##  Test Bronze Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {bronze_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Silver Rate Limit 10r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 10, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {silver_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Gold Rate Limit 15r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 15, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {gold_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Bronze Default Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host}, + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, bronze_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, silver_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, gold_pol_name, v_s_route_setup.route_m.namespace) + self.restore_default_vsr(kube_apis, v_s_route_setup) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize( + "src", [rl_vsr_override_tiered_basic_premium_vs_spec_src, rl_vsr_override_tiered_basic_premium_vs_route_src] + ) + def test_override_multiple_tiered_jwt_claim_sub_vs_vsr( + self, + kube_apis, + crd_ingress_controller, + v_s_route_app_setup, + test_namespace, + v_s_route_setup, + src, + ): + """ + Test if vsr subroute policy overrides vs spec policy + And vsr subroute policy overrides vs route policy + """ + + # policies for virtualserver/vsr + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_premium_no_default_jwt_claim_sub + ) + bronze_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_bronze_with_default_jwt_claim_sub + ) + silver_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_silver_no_default_jwt_claim_sub + ) + gold_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_gold_no_default_jwt_claim_sub + ) + + # patch vsr with bronze/silver/gold tier policies + apply_and_assert_valid_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, + v_s_route_setup.route_m.name, + rl_vsr_bronze_silver_gold_jwt_claim_sub, + ) + + # patch vs with basic/premium policies + apply_and_assert_valid_vs( + kube_apis, + v_s_route_setup.namespace, + v_s_route_setup.vs_name, + src, + ) + + basic_jwt_token = jwt.encode( + {"user_details": {"level": "Basic"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + premium_jwt_token = jwt.encode( + {"user_details": {"level": "Premium"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + bronze_jwt_token = jwt.encode( + {"user_details": {"tier": "Bronze"}, "sub": "client1"}, + "nginx", + algorithm="HS256", + ) + silver_jwt_token = jwt.encode( + {"user_details": {"tier": "Silver"}, "sub": "client2"}, + "nginx", + algorithm="HS256", + ) + gold_jwt_token = jwt.encode( + {"user_details": {"tier": "Gold"}, "sub": "client3"}, + "nginx", + algorithm="HS256", + ) + + req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}" + + ##  Test Basic Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {basic_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Premium Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {premium_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Basic Default Rate Limit 1r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[1]}", + 200, + 1, + headers={"host": v_s_route_setup.vs_host}, + ) + wait_before_test(1) + + ##  Test Bronze Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {bronze_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Silver Rate Limit 10r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 10, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {silver_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Gold Rate Limit 15r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 15, + headers={"host": v_s_route_setup.vs_host, "Authorization": f"Bearer {gold_jwt_token}"}, + ) + wait_before_test(1) + + ##  Test Bronze Default Rate Limit 5r/s + self.check_rate_limit_nearly_eq( + f"{req_url}{v_s_route_setup.route_m.paths[0]}", + 200, + 5, + headers={"host": v_s_route_setup.vs_host}, + ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, bronze_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, silver_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, gold_pol_name, v_s_route_setup.route_m.namespace) + self.restore_default_vsr(kube_apis, v_s_route_setup) + delete_and_create_vs_from_yaml( + kube_apis.custom_objects, v_s_route_setup.vs_name, std_vs_src, v_s_route_setup.namespace + ) + + @pytest.mark.skip_for_nginx_oss + @pytest.mark.parametrize("src", [rl_vsr_basic_premium_jwt_claim_sub]) + def test_rl_duplicate_default_policy_tiered_basic_premium_with_default_jwt_claim_sub_vsr( + self, + kube_apis, + ingress_controller_prerequisites, + crd_ingress_controller, + v_s_route_app_setup, + v_s_route_setup, + test_namespace, + src, + ): + """ + Test if when both a basic and premium rate-limiting policy are the default for the tier, + the VS goes into a Invalid state and emits a Warning Event. + Policies are applied at the VirtualServer Route level + """ + basic_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_basic_with_default_jwt_claim_sub + ) + premium_pol_name = apply_and_assert_valid_policy( + kube_apis, v_s_route_setup.route_m.namespace, rl_pol_premium_with_default_jwt_claim_sub + ) + + # Patch VirtualServerRoute + apply_and_assert_warning_vsr( + kube_apis, + v_s_route_setup.route_m.namespace, + v_s_route_setup.route_m.name, + src, + ) + + # Assert that the 'AddedOrUpdatedWithWarning' event is present assert ( - policy_info["status"] - and policy_info["status"]["reason"] == "AddedOrUpdated" - and policy_info["status"]["state"] == "Valid" + wait_for_event( + kube_apis.v1, + f"Tiered rate-limit Policies on [{v_s_route_setup.route_m.namespace}/{v_s_route_setup.route_m.name}] contain conflicting default values", + v_s_route_setup.route_m.namespace, + 30, + ) + is True ) + + delete_policy(kube_apis.custom_objects, basic_pol_name, v_s_route_setup.route_m.namespace) + delete_policy(kube_apis.custom_objects, premium_pol_name, v_s_route_setup.route_m.namespace) + self.restore_default_vsr(kube_apis, v_s_route_setup) diff --git a/tests/suite/utils/custom_assertions.py b/tests/suite/utils/custom_assertions.py index 6cfc926886..e82c4924fa 100644 --- a/tests/suite/utils/custom_assertions.py +++ b/tests/suite/utils/custom_assertions.py @@ -141,7 +141,7 @@ def assert_event(event_text, events_list) -> None: for i in range(len(events_list) - 1, -1, -1): if event_text in events_list[i].message: return - pytest.fail(f'Failed to find the event "{event_text}" in the list. Exiting...') + pytest.fail(f'Failed to find the event "{event_text}" in {events_list}. Exiting...') def assert_event_not_present(event_text, events_list) -> None: diff --git a/tests/suite/utils/policy_resources_utils.py b/tests/suite/utils/policy_resources_utils.py index 44531185d1..57a8951cec 100644 --- a/tests/suite/utils/policy_resources_utils.py +++ b/tests/suite/utils/policy_resources_utils.py @@ -6,7 +6,7 @@ from kubernetes.client import CustomObjectsApi from kubernetes.client.rest import ApiException from suite.utils.custom_resources_utils import read_custom_resource -from suite.utils.resources_utils import ensure_item_removal +from suite.utils.resources_utils import ensure_item_removal, wait_before_test def read_policy(custom_objects: CustomObjectsApi, namespace, name) -> object: @@ -58,3 +58,16 @@ def delete_policy(custom_objects: CustomObjectsApi, name, namespace) -> None: name, ) print(f"Policy was removed with name '{name}'") + + +def apply_and_assert_valid_policy(kube_apis, namespace, policy_yaml): + pol_name = create_policy_from_yaml(kube_apis.custom_objects, policy_yaml, namespace) + wait_before_test(1) + policy_info = read_custom_resource(kube_apis.custom_objects, namespace, "policies", pol_name) + assert ( + "status" in policy_info + and policy_info["status"]["reason"] == "AddedOrUpdated" + and policy_info["status"]["state"] == "Valid" + ) + + return pol_name diff --git a/tests/suite/utils/resources_utils.py b/tests/suite/utils/resources_utils.py index 06b3f0530d..e7f0948425 100644 --- a/tests/suite/utils/resources_utils.py +++ b/tests/suite/utils/resources_utils.py @@ -1744,6 +1744,28 @@ def get_events(v1: CoreV1Api, namespace) -> []: return res.items +def wait_for_event(v1: CoreV1Api, text, namespace, retry=30, interval=1) -> None: + """ + Wait for an event on an object in a namespace. + + :param v1: CoreV1Api + :param text: event text + :param namespace: object namespace + :param retry: + :param interval: + :return: + """ + c = 0 + while c < retry: + events = get_events(v1, namespace) + for i in range(len(events) - 1, -1, -1): + if text in events[i].message: + return True + wait_before_test(interval) + c += 1 + return False + + def ensure_response_from_backend(req_url, host, additional_headers=None, check404=False, sni=False) -> None: """ Wait for 502|504|404 to disappear. diff --git a/tests/suite/utils/vs_vsr_resources_utils.py b/tests/suite/utils/vs_vsr_resources_utils.py index ddf59ed618..228eb5023e 100644 --- a/tests/suite/utils/vs_vsr_resources_utils.py +++ b/tests/suite/utils/vs_vsr_resources_utils.py @@ -196,7 +196,89 @@ def patch_v_s_route_from_yaml(custom_objects: CustomObjectsApi, name, yaml_manif raise -def get_vs_nginx_template_conf(v1: CoreV1Api, vs_namespace, vs_name, pod_name, pod_namespace) -> str: +def apply_and_assert_valid_vsr(kube_apis, namespace, name, vsr_yaml): + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, + name, + vsr_yaml, + namespace, + ) + wait_before_test(1) + vsr_info = read_custom_resource( + kube_apis.custom_objects, + namespace, + "virtualserverroutes", + name, + ) + assert ( + vsr_info["status"] + and vsr_info["status"]["reason"] == "AddedOrUpdated" + and vsr_info["status"]["state"] == "Valid" + ), vsr_info + + +def apply_and_assert_warning_vsr(kube_apis, namespace, name, vsr_yaml): + delete_and_create_v_s_route_from_yaml( + kube_apis.custom_objects, + name, + vsr_yaml, + namespace, + ) + wait_before_test(1) + vsr_info = read_custom_resource( + kube_apis.custom_objects, + namespace, + "virtualserverroutes", + name, + ) + assert ( + vsr_info["status"] + and vsr_info["status"]["reason"] == "AddedOrUpdatedWithWarning" + and vsr_info["status"]["state"] == "Warning" + ), vsr_info + + +def apply_and_assert_valid_vs(kube_apis, namespace, name, vs_yaml): + patch_virtual_server_from_yaml( + kube_apis.custom_objects, + name, + vs_yaml, + namespace, + ) + wait_before_test(1) + vs_info = read_custom_resource( + kube_apis.custom_objects, + namespace, + "virtualservers", + name, + ) + assert ( + vs_info["status"] and vs_info["status"]["reason"] == "AddedOrUpdated" and vs_info["status"]["state"] == "Valid" + ), vs_info + + +def apply_and_assert_warning_vs(kube_apis, namespace, name, vs_yaml): + patch_virtual_server_from_yaml( + kube_apis.custom_objects, + name, + vs_yaml, + namespace, + ) + wait_before_test(1) + vs_info = read_custom_resource( + kube_apis.custom_objects, + namespace, + "virtualservers", + name, + ) + assert ( + vs_info["status"] + and vs_info["status"]["reason"] == "AddedOrUpdatedWithWarning" + and vs_info["status"]["state"] == "Warning" + ), vs_info + + +def get_vs_nginx_template_conf(v1: CoreV1Api, vs_namespace, vs_name, pod_name, pod_namespace, print_log=True) -> str: """ Get contents of /etc/nginx/conf.d/vs_{namespace}_{vs_name}.conf in the pod. @@ -205,10 +287,11 @@ def get_vs_nginx_template_conf(v1: CoreV1Api, vs_namespace, vs_name, pod_name, p :param vs_name: :param pod_name: :param pod_namespace: + :param print_log: :return: str """ file_path = f"/etc/nginx/conf.d/vs_{vs_namespace}_{vs_name}.conf" - return get_file_contents(v1, file_path, pod_name, pod_namespace) + return get_file_contents(v1, file_path, pod_name, pod_namespace, print_log) def create_v_s_route_from_yaml(custom_objects: CustomObjectsApi, yaml_manifest, namespace) -> str: