Skip to content

Commit abfdd69

Browse files
authored
fix(test): ensure that the controller sets rate limit status (#13377)
The http_local_rate_limit_policy test creates a resource with a status already hydrated, but status setting is a job of the controller. This change updates the test to create a resource without a status and then to wait for the status to be set properly. This will hopefully help us to avoid race conditions in this test whereby the API lookup can occur before the controller observes the resource creation.
1 parent a3f1e29 commit abfdd69

File tree

1 file changed

+25
-24
lines changed

1 file changed

+25
-24
lines changed

policy-test/tests/inbound_api.rs

Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,10 @@
1-
use std::time::Duration;
2-
31
use futures::prelude::*;
4-
use k8s_openapi::chrono;
52
use kube::ResourceExt;
63
use linkerd_policy_controller_core::{Ipv4Net, Ipv6Net};
74
use linkerd_policy_controller_k8s_api as k8s;
85
use linkerd_policy_test::{
96
assert_default_all_unauthenticated_labels, assert_is_default_all_unauthenticated,
10-
assert_protocol_detect, create, create_ready_pod, grpc, with_temp_ns,
7+
assert_protocol_detect, await_condition, create, create_ready_pod, grpc, with_temp_ns,
118
};
129
use maplit::{btreemap, convert_args, hashmap};
1310
use tokio::time;
@@ -332,7 +329,7 @@ async fn http_local_rate_limit_policy() {
332329
.await;
333330

334331
// Create a rate-limit policy associated to the server
335-
create(
332+
let rate_limit = create(
336333
&client,
337334
k8s::policy::ratelimit_policy::HttpLocalRateLimitPolicy {
338335
metadata: k8s::ObjectMeta {
@@ -356,25 +353,29 @@ async fn http_local_rate_limit_policy() {
356353
}],
357354
}]),
358355
},
359-
status: Some(k8s::policy::HttpLocalRateLimitPolicyStatus {
360-
conditions: vec![k8s::Condition {
361-
last_transition_time: k8s::Time(chrono::DateTime::<chrono::Utc>::MIN_UTC),
362-
message: "".to_string(),
363-
observed_generation: None,
364-
reason: "".to_string(),
365-
status: "True".to_string(),
366-
type_: "Accepted".to_string(),
367-
}],
368-
target_ref: k8s::policy::LocalTargetRef {
369-
group: Some("policy.linkerd.io".to_string()),
370-
kind: "Server".to_string(),
371-
name: "linkerd-admin".to_string(),
372-
},
373-
}),
356+
status: None,
374357
},
375358
)
376359
.await;
377360

361+
await_condition(
362+
&client,
363+
&ns,
364+
&rate_limit.name_unchecked(),
365+
|obj: Option<&k8s::policy::ratelimit_policy::HttpLocalRateLimitPolicy>| {
366+
obj.as_ref().map_or(false, |obj| {
367+
obj.status.as_ref().map_or(false, |status| {
368+
status
369+
.conditions
370+
.iter()
371+
.any(|c| c.type_ == "Accepted" && c.status == "True")
372+
})
373+
})
374+
},
375+
)
376+
.await
377+
.expect("rate limit must get a status");
378+
378379
let client_id = format!("sa-0.{}.serviceaccount.identity.linkerd.cluster.local", ns);
379380
let ratelimit_overrides = vec![(200, vec![client_id])];
380381
let ratelimit =
@@ -609,23 +610,23 @@ async fn http_routes_ordered_by_creation() {
609610
// Creation timestamps in Kubernetes only have second precision, so we
610611
// must wait a whole second between creating each of these routes in
611612
// order for them to have different creation timestamps.
612-
tokio::time::sleep(Duration::from_secs(1)).await;
613+
time::sleep(time::Duration::from_secs(1)).await;
613614
create(
614615
&client,
615616
mk_admin_route_with_path(ns.as_ref(), "a", "/ready"),
616617
)
617618
.await;
618619
next_config(&mut rx).await;
619620

620-
tokio::time::sleep(Duration::from_secs(1)).await;
621+
time::sleep(time::Duration::from_secs(1)).await;
621622
create(
622623
&client,
623624
mk_admin_route_with_path(ns.as_ref(), "c", "/shutdown"),
624625
)
625626
.await;
626627
next_config(&mut rx).await;
627628

628-
tokio::time::sleep(Duration::from_secs(1)).await;
629+
time::sleep(time::Duration::from_secs(1)).await;
629630
create(
630631
&client,
631632
mk_admin_route_with_path(ns.as_ref(), "b", "/proxy-log-level"),
@@ -815,7 +816,7 @@ async fn retry_watch_server(
815816
Ok(rx) => return rx,
816817
Err(error) => {
817818
tracing::error!(?error, ns, pod_name, "failed to watch policy for port 4191");
818-
time::sleep(Duration::from_secs(1)).await;
819+
time::sleep(time::Duration::from_secs(1)).await;
819820
}
820821
}
821822
}

0 commit comments

Comments
 (0)