Add test for inbound policy index deletion (#12143)

PR https://github.com/linkerd/linkerd2/pull/12088 fixed an issue where removing and then re-adding certain policy resources could leave the policy index in an incorrect state. We add a test for the specific condition that triggered this behavior to prevent against future regressions.

Verified that this test fails prior to https://github.com/linkerd/linkerd2/pull/12088 but passes on main.

Signed-off-by: Alex Leong <alex@buoyant.io>
This commit is contained in:
Alex Leong 2024-03-07 09:03:28 -08:00 committed by GitHub
parent 179edce222
commit 83be0dd086
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 188 additions and 0 deletions

View File

@ -1256,6 +1256,7 @@ dependencies = [
"futures",
"http",
"k8s-gateway-api",
"k8s-openapi",
"kube",
"kubert",
"linkerd-policy-controller-core",

View File

@ -26,6 +26,7 @@ tracing = "0.1"
[dev-dependencies]
chrono = { version = "0.4", default-features = false }
k8s-openapi = { version = "0.20", features = ["schemars"] }
maplit = "1"
tokio-stream = "0.1"
tokio-test = "0.4"

View File

@ -1,4 +1,6 @@
use super::*;
use k8s_openapi::apimachinery::pkg::apis::meta::v1 as metav1;
use linkerd_policy_controller_core::{http_route, inbound};
#[test]
fn links_authorization_policy_with_mtls_name() {
@ -264,6 +266,129 @@ fn links_authorization_policy_with_service_account() {
);
}
#[test]
fn authorization_policy_prevents_index_deletion() {
let test = TestConfig::default();
// Create policy resources: Server, HttpRoute, AuthorizationPolicy, and NetworkAuthentication.
let server = mk_server(
"ns-0",
"srv-8080",
Port::Number(8080.try_into().unwrap()),
None,
Some(("app", "app-0")),
Some(k8s::policy::server::ProxyProtocol::Http1),
);
test.index.write().apply(server.clone());
let authz = ClientAuthorization {
networks: vec!["10.0.0.0/8".parse::<IpNet>().unwrap().into()],
authentication: ClientAuthentication::TlsAuthenticated(vec![IdentityMatch::Exact(
"foo.ns-0.serviceaccount.identity.linkerd.cluster.example.com".to_string(),
)]),
};
let authz_policy = k8s::policy::AuthorizationPolicy {
metadata: k8s::ObjectMeta {
namespace: Some("ns-0".to_string()),
name: Some("authz-foo".to_string()),
..Default::default()
},
spec: k8s::policy::AuthorizationPolicySpec {
target_ref: LocalTargetRef {
group: Some("policy.linkerd.io".to_string()),
kind: "HTTPRoute".to_string(),
name: "route-foo".to_string(),
},
required_authentication_refs: vec![
NamespacedTargetRef {
group: Some("policy.linkerd.io".to_string()),
kind: "NetworkAuthentication".to_string(),
name: "net-foo".to_string(),
namespace: None,
},
NamespacedTargetRef {
group: None,
kind: "ServiceAccount".to_string(),
namespace: Some("ns-0".to_string()),
name: "foo".to_string(),
},
],
},
};
test.index.write().apply(authz_policy.clone());
let route = mk_http_route("ns-0", "route-foo", "srv-8080");
test.index.write().apply(route.clone());
let net_authn = mk_network_authentication(
"ns-0".to_string(),
"net-foo".to_string(),
vec![k8s::policy::network_authentication::Network {
cidr: "10.0.0.0/8".parse().unwrap(),
except: None,
}],
);
test.index.write().apply(net_authn.clone());
// Now we delete the server, and HTTPRoute.
<Index as kubert::index::IndexNamespacedResource<k8s::policy::Server>>::delete(
&mut test.index.write(),
"ns-0".to_string(),
"srv-8080".to_string(),
);
<Index as kubert::index::IndexNamespacedResource<k8s::policy::HttpRoute>>::delete(
&mut test.index.write(),
"ns-0".to_string(),
"route-foo".to_string(),
);
// Recreate the pod, server, and HTTPRoute.
test.index.write().apply(server);
test.index.write().apply(route);
let mut pod = mk_pod("ns-0", "pod-0", Some(("container-0", None)));
pod.labels_mut()
.insert("app".to_string(), "app-0".to_string());
test.index.write().apply(pod.clone());
let rx = test
.index
.write()
.pod_server_rx("ns-0", "pod-0", 8080.try_into().unwrap())
.expect("pod-0.ns-0 should exist");
// AuthorizationPolicy should apply.
assert_eq!(
*rx.borrow(),
InboundServer {
reference: ServerRef::Server("srv-8080".to_string()),
authorizations: Default::default(),
protocol: ProxyProtocol::Http1,
http_routes: hashmap!(HttpRouteRef::Linkerd(http_route::GroupKindName{
group: "policy.linkerd.io".into(),
kind: "HTTPRoute".into(),
name: "route-foo".into(),
}) => HttpRoute {
rules: vec![inbound::HttpRouteRule {
matches: vec![http_route::HttpRouteMatch {
path: Some(http_route::PathMatch::Prefix("/foo".to_string())),
headers: vec![],
query_params: vec![],
method: None,
}],
filters: vec![],
}],
authorizations: hashmap!(
AuthorizationRef::AuthorizationPolicy("authz-foo".to_string()) => authz.clone()
)
.into_iter()
.collect(),
..Default::default()
})
.into_iter()
.collect(),
},
);
}
fn mk_authorization_policy(
ns: impl ToString,
name: impl ToString,
@ -339,3 +464,64 @@ fn mk_network_authentication(
},
}
}
fn mk_http_route(
ns: impl ToString,
name: impl ToString,
server: impl ToString,
) -> k8s::policy::HttpRoute {
k8s::policy::HttpRoute {
metadata: k8s::ObjectMeta {
namespace: Some(ns.to_string()),
name: Some(name.to_string()),
..Default::default()
},
spec: k8s::policy::HttpRouteSpec {
inner: k8s_gateway_api::CommonRouteSpec {
parent_refs: Some(vec![k8s_gateway_api::ParentReference {
group: Some("policy.linkerd.io".to_string()),
kind: Some("Server".to_string()),
namespace: Some(ns.to_string()),
name: server.to_string(),
section_name: None,
port: None,
}]),
},
rules: Some(vec![k8s::policy::httproute::HttpRouteRule {
matches: Some(vec![k8s::policy::httproute::HttpRouteMatch {
path: Some(k8s_gateway_api::HttpPathMatch::PathPrefix {
value: "/foo".to_string(),
}),
..Default::default()
}]),
filters: None,
backend_refs: None,
timeouts: None,
}]),
..Default::default()
},
status: Some(k8s::policy::httproute::HttpRouteStatus {
inner: k8s_gateway_api::RouteStatus {
parents: vec![k8s_gateway_api::RouteParentStatus {
conditions: vec![metav1::Condition {
type_: "Accepted".to_string(),
status: "True".to_string(),
message: String::new(),
reason: String::new(),
last_transition_time: metav1::Time(chrono::Utc::now()),
observed_generation: None,
}],
parent_ref: k8s_gateway_api::ParentReference {
group: Some("policy.linkerd.io".to_string()),
kind: Some("Server".to_string()),
namespace: Some(ns.to_string()),
name: server.to_string(),
section_name: None,
port: None,
},
controller_name: "linkerd.io/policy-controller".to_string(),
}],
},
}),
}
}