Audit access policy implementation (#12846)

Followup to #12845

This expands the policy controller index in the following ways:

- Adds the new Audit variant to the DefaultPolicy enum
- Expands the function that synthesizes the authorizations for a given default policy (DefaultPolicy::default_authzs) so that it also creates an Unauthenticated client auth and a allow-all NetworkMatch for the new Audit default policy.
- Now that a Server can have a default policy different than Deny, when generating InboundServer authorizations (PolicyIndex::client_authzs) make sure to append the default authorizations when DefaultPolicy is Allow or Audit

Also, the admission controller ensures the new accessPolicy field contains a valid value.

## Tests

New integration tests added:

- e2e_audit.rs exercising first the audit policy in Server, and then at the namespace level
- in admit_server.rs a new test checks invalid accessPolicy values are rejected.
- in inbound_api.rs server_with_audit_policy verifies the synthesized audit authorization is returned for a Server with accessPolicy=audit

> [!NOTE]
> Please check linkerd/website#1805 for how this is supposed to work from the user's perspective.
This commit is contained in:
Alejandro Pedraza 2024-07-26 13:34:22 -05:00 committed by GitHub
parent aed4850e6c
commit a9fa176dd1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 301 additions and 40 deletions

View File

@ -18,6 +18,7 @@ pub struct ServerSpec {
pub selector: Selector,
pub port: Port,
pub proxy_protocol: Option<ProxyProtocol>,
pub access_policy: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)]

View File

@ -21,6 +21,9 @@ pub enum DefaultPolicy {
/// Indicates that all traffic is denied unless explicitly permitted by an authorization policy.
Deny,
/// Indicates that all traffic is let through, but gets audited
Audit,
}
// === impl DefaultPolicy ===
@ -47,6 +50,7 @@ impl std::str::FromStr for DefaultPolicy {
cluster_only: true,
}),
"deny" => Ok(Self::Deny),
"audit" => Ok(Self::Audit),
s => Err(anyhow!("invalid mode: {:?}", s)),
}
}
@ -72,6 +76,7 @@ impl DefaultPolicy {
cluster_only: true,
} => "cluster-unauthenticated",
Self::Deny => "deny",
Self::Audit => "audit",
}
}
@ -80,34 +85,48 @@ impl DefaultPolicy {
config: &ClusterInfo,
) -> HashMap<AuthorizationRef, ClientAuthorization> {
let mut authzs = HashMap::default();
let auth_ref = AuthorizationRef::Default(self.as_str());
if let DefaultPolicy::Allow {
authenticated_only,
cluster_only,
} = self
{
let authentication = if authenticated_only {
ClientAuthentication::TlsAuthenticated(vec![IdentityMatch::Suffix(vec![])])
} else {
ClientAuthentication::Unauthenticated
};
let networks = if cluster_only {
config.networks.iter().copied().map(Into::into).collect()
} else {
vec![
"0.0.0.0/0".parse::<IpNet>().unwrap().into(),
"::/0".parse::<IpNet>().unwrap().into(),
]
};
authzs.insert(
AuthorizationRef::Default(self.as_str()),
ClientAuthorization {
authentication,
networks,
},
auth_ref,
Self::default_client_authz(config, authenticated_only, cluster_only),
);
};
} else if let DefaultPolicy::Audit = self {
authzs.insert(auth_ref, Self::default_client_authz(config, false, false));
}
authzs
}
fn default_client_authz(
config: &ClusterInfo,
authenticated_only: bool,
cluster_only: bool,
) -> ClientAuthorization {
let authentication = if authenticated_only {
ClientAuthentication::TlsAuthenticated(vec![IdentityMatch::Suffix(vec![])])
} else {
ClientAuthentication::Unauthenticated
};
let networks = if cluster_only {
config.networks.iter().copied().map(Into::into).collect()
} else {
vec![
"0.0.0.0/0".parse::<IpNet>().unwrap().into(),
"::/0".parse::<IpNet>().unwrap().into(),
]
};
ClientAuthorization {
authentication,
networks,
}
}
}
impl std::fmt::Display for DefaultPolicy {
@ -140,6 +159,7 @@ mod test {
authenticated_only: false,
cluster_only: true,
},
DefaultPolicy::Audit,
] {
assert_eq!(
default.to_string().parse::<DefaultPolicy>().unwrap(),

View File

@ -1753,6 +1753,10 @@ impl PolicyIndex {
authzs.insert(reference, authz);
}
if let Some(p) = server.access_policy {
authzs.extend(p.default_authzs(&self.cluster_info));
}
authzs
}

View File

@ -1,4 +1,4 @@
use crate::ClusterInfo;
use crate::{ClusterInfo, DefaultPolicy};
use linkerd_policy_controller_core::inbound::ProxyProtocol;
use linkerd_policy_controller_k8s_api::{
self as k8s, policy::server::Port, policy::server::Selector,
@ -11,6 +11,7 @@ pub(crate) struct Server {
pub selector: Selector,
pub port_ref: Port,
pub protocol: ProxyProtocol,
pub access_policy: Option<DefaultPolicy>,
}
impl Server {
@ -20,6 +21,7 @@ impl Server {
selector: srv.spec.selector,
port_ref: srv.spec.port,
protocol: proxy_protocol(srv.spec.proxy_protocol, cluster),
access_policy: srv.spec.access_policy.and_then(|p| p.parse().ok()),
}
}
}

View File

@ -45,7 +45,7 @@ struct TestConfig {
_tracing: tracing::subscriber::DefaultGuard,
}
const DEFAULTS: [DefaultPolicy; 5] = [
const DEFAULTS: [DefaultPolicy; 6] = [
DefaultPolicy::Deny,
DefaultPolicy::Allow {
authenticated_only: true,
@ -63,6 +63,7 @@ const DEFAULTS: [DefaultPolicy; 5] = [
authenticated_only: false,
cluster_only: true,
},
DefaultPolicy::Audit,
];
pub fn mk_pod_with_containers(
@ -121,6 +122,7 @@ fn mk_server(
port,
selector: k8s::policy::server::Selector::Pod(pod_labels.into_iter().collect()),
proxy_protocol,
access_policy: None,
},
}
}
@ -177,6 +179,13 @@ fn mk_default_policy(
networks: cluster_nets,
},
)),
DefaultPolicy::Audit => Some((
AuthorizationRef::Default("audit"),
ClientAuthorization {
authentication: ClientAuthentication::Unauthenticated,
networks: all_nets,
},
)),
}
.into_iter()
.collect()

View File

@ -109,6 +109,7 @@ fn authenticated_annotated() {
authenticated_only: true,
},
DefaultPolicy::Deny => DefaultPolicy::Deny,
DefaultPolicy::Audit => DefaultPolicy::Audit,
};
InboundServer {
reference: ServerRef::Default(policy.as_str()),

View File

@ -47,6 +47,7 @@ fn make_server(
port,
selector: linkerd_k8s_api::server::Selector::Pod(pod_labels.into_iter().collect()),
proxy_protocol,
access_policy: None,
},
}
}

View File

@ -336,7 +336,8 @@ impl Validate<MeshTLSAuthenticationSpec> for Admission {
#[async_trait::async_trait]
impl Validate<ServerSpec> for Admission {
/// Checks that `spec` doesn't select the same pod/ports as other existing Servers
/// Checks that `spec` doesn't select the same pod/ports as other existing Servers, and that
/// `accessPolicy` contains a valid value
//
// TODO(ver) this isn't rigorous about detecting servers that select the same port if one port
// specifies a numeric port and the other specifies the port's name.
@ -369,6 +370,12 @@ impl Validate<ServerSpec> for Admission {
}
}
if let Some(policy) = spec.access_policy {
policy
.parse::<index::DefaultPolicy>()
.map_err(|err| anyhow!("Invalid 'accessPolicy' field: {err}"))?;
}
Ok(())
}
}

View File

@ -33,7 +33,7 @@ pub fn pod(ns: &str) -> k8s::Pod {
}
}
pub fn server(ns: &str) -> k8s::policy::Server {
pub fn server(ns: &str, access_policy: Option<String>) -> k8s::policy::Server {
k8s::policy::Server {
metadata: k8s::ObjectMeta {
namespace: Some(ns.to_string()),
@ -46,6 +46,7 @@ pub fn server(ns: &str) -> k8s::policy::Server {
)))),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy,
},
}
}

View File

@ -16,6 +16,7 @@ async fn accepts_valid() {
selector: Selector::Pod(api::labels::Selector::default()),
port: Port::Number(80.try_into().unwrap()),
proxy_protocol: None,
access_policy: None,
},
})
.await;
@ -34,6 +35,7 @@ async fn accepts_server_updates() {
selector: Selector::Pod(api::labels::Selector::from_iter(Some(("app", "test")))),
port: Port::Number(80.try_into().unwrap()),
proxy_protocol: None,
access_policy: None,
},
};
@ -60,6 +62,7 @@ async fn rejects_identitical_pod_selector() {
selector: Selector::Pod(api::labels::Selector::from_iter(Some(("app", "test")))),
port: Port::Number(80.try_into().unwrap()),
proxy_protocol: None,
access_policy: None,
};
let api = kube::Api::namespaced(client, &ns);
@ -106,6 +109,7 @@ async fn rejects_all_pods_selected() {
selector: Selector::Pod(api::labels::Selector::from_iter(Some(("app", "test")))),
port: Port::Number(80.try_into().unwrap()),
proxy_protocol: Some(ProxyProtocol::Http2),
access_policy: None,
},
};
api.create(&kube::api::PostParams::default(), &test0)
@ -123,6 +127,7 @@ async fn rejects_all_pods_selected() {
port: Port::Number(80.try_into().unwrap()),
// proxy protocol doesn't factor into the selection
proxy_protocol: Some(ProxyProtocol::Http1),
access_policy: None,
},
};
api.create(&kube::api::PostParams::default(), &test1)
@ -179,3 +184,21 @@ async fn rejects_invalid_proxy_protocol() {
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn rejects_invalid_access_policy() {
admission::rejects(|ns| Server {
metadata: api::ObjectMeta {
namespace: Some(ns),
name: Some("test".to_string()),
..Default::default()
},
spec: ServerSpec {
selector: Selector::Pod(api::labels::Selector::default()),
port: Port::Number(80.try_into().unwrap()),
proxy_protocol: None,
access_policy: Some("foobar".to_string()),
},
})
.await;
}

View File

@ -0,0 +1,123 @@
use kube::{Client, ResourceExt};
use linkerd_policy_controller_k8s_api as k8s;
use linkerd_policy_test::{create, create_ready_pod, curl, web, with_temp_ns, LinkerdInject};
#[tokio::test(flavor = "current_thread")]
async fn server_audit() {
with_temp_ns(|client, ns| async move {
// Create a server with no policy that should block traffic to the associated pod
let srv = create(&client, web::server(&ns, None)).await;
// Create the web pod and wait for it to be ready.
tokio::join!(
create(&client, web::service(&ns)),
create_ready_pod(&client, web::pod(&ns))
);
// All requests should fail
let curl = curl::Runner::init(&client, &ns).await;
let (injected, uninjected) = tokio::join!(
curl.run("curl-injected", "http://web", LinkerdInject::Enabled),
curl.run("curl-uninjected", "http://web", LinkerdInject::Disabled),
);
let (injected_status, uninjected_status) =
tokio::join!(injected.exit_code(), uninjected.exit_code());
assert_ne!(injected_status, 0, "injected curl must fail");
assert_ne!(uninjected_status, 0, "uninjected curl must fail");
// Patch the server with accessPolicy audit
let patch = serde_json::json!({
"spec": {
"accessPolicy": "audit",
}
});
let patch = k8s::Patch::Merge(patch);
let api = k8s::Api::<k8s::policy::Server>::namespaced(client.clone(), &ns);
api.patch(&srv.name_unchecked(), &k8s::PatchParams::default(), &patch)
.await
.expect("failed to patch server");
// All requests should succeed
let (injected, uninjected) = tokio::join!(
curl.run("curl-audit-injected", "http://web", LinkerdInject::Enabled),
curl.run(
"curl-audit-uninjected",
"http://web",
LinkerdInject::Disabled
),
);
let (injected_status, uninjected_status) =
tokio::join!(injected.exit_code(), uninjected.exit_code());
assert_eq!(injected_status, 0, "injected curl must contact web");
assert_eq!(uninjected_status, 0, "uninjected curl must contact web");
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn ns_audit() {
with_temp_ns(|client, ns| async move {
change_access_policy(client.clone(), &ns, "cluster-authenticated").await;
// Create the web pod and wait for it to be ready.
tokio::join!(
create(&client, web::service(&ns)),
create_ready_pod(&client, web::pod(&ns))
);
// Unmeshed requests should fail
let curl = curl::Runner::init(&client, &ns).await;
let (injected, uninjected) = tokio::join!(
curl.run("curl-injected", "http://web", LinkerdInject::Enabled),
curl.run("curl-uninjected", "http://web", LinkerdInject::Disabled),
);
let (injected_status, uninjected_status) =
tokio::join!(injected.exit_code(), uninjected.exit_code());
assert_eq!(injected_status, 0, "injected curl must contact web");
assert_ne!(uninjected_status, 0, "uninjected curl must fail");
change_access_policy(client.clone(), &ns, "audit").await;
// Recreate pod for it to pick the new default policy
let api = kube::Api::<k8s::api::core::v1::Pod>::namespaced(client.clone(), &ns);
kube::runtime::wait::delete::delete_and_finalize(
api,
"web",
&kube::api::DeleteParams::foreground(),
)
.await
.expect("web pod must be deleted");
create_ready_pod(&client, web::pod(&ns)).await;
// All requests should work
let (injected, uninjected) = tokio::join!(
curl.run("curl-audit-injected", "http://web", LinkerdInject::Enabled),
curl.run(
"curl-audit-uninjected",
"http://web",
LinkerdInject::Disabled
),
);
let (injected_status, uninjected_status) =
tokio::join!(injected.exit_code(), uninjected.exit_code());
assert_eq!(injected_status, 0, "injected curl must contact web");
assert_eq!(uninjected_status, 0, "uninject curl must contact web");
})
.await;
}
async fn change_access_policy(client: Client, ns: &str, policy: &str) {
let api = k8s::Api::<k8s::Namespace>::all(client.clone());
let patch = serde_json::json!({
"metadata": {
"annotations": {
"config.linkerd.io/default-inbound-policy": policy,
}
}
});
let patch = k8s::Patch::Merge(patch);
api.patch(ns, &k8s::PatchParams::default(), &patch)
.await
.expect("failed to patch namespace");
}

View File

@ -16,7 +16,7 @@ async fn meshtls() {
//
// The policy requires that all connections are authenticated with MeshTLS.
let (srv, all_mtls) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(&client, all_authenticated(&ns))
);
create(
@ -60,7 +60,7 @@ async fn targets_route() {
//
// The policy requires that all connections are authenticated with MeshTLS.
let (srv, all_mtls) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(&client, all_authenticated(&ns)),
);
// Create a route which matches the /allowed path.
@ -171,7 +171,7 @@ async fn targets_namespace() {
//
// The policy requires that all connections are authenticated with MeshTLS.
let (_srv, all_mtls) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(&client, all_authenticated(&ns))
);
create(
@ -220,7 +220,7 @@ async fn meshtls_namespace() {
// The policy requires that all connections are authenticated with MeshTLS
// and come from service accounts in the given namespace.
let (srv, mtls_ns) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(&client, ns_authenticated(&ns))
);
create(
@ -277,7 +277,7 @@ async fn network() {
// Once we know the IP of the (blocked) pod, create an web
// authorization policy that permits connections from this pod.
let (srv, allow_ips) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(&client, allow_ips(&ns, Some(blessed_ip)))
);
create(
@ -351,7 +351,7 @@ async fn both() {
// Once we know the IP of the (blocked) pod, create an web
// authorization policy that permits connections from this pod.
let (srv, allow_ips, all_mtls) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(
&client,
allow_ips(&ns, vec![blessed_injected_ip, blessed_uninjected_ip]),
@ -451,7 +451,7 @@ async fn either() {
// Once we know the IP of the (blocked) pod, create an web
// authorization policy that permits connections from this pod.
let (srv, allow_ips, all_mtls) = tokio::join!(
create(&client, web::server(&ns)),
create(&client, web::server(&ns, None)),
create(&client, allow_ips(&ns, vec![blessed_uninjected_ip])),
create(&client, all_authenticated(&ns))
);
@ -528,7 +528,7 @@ async fn either() {
async fn empty_authentications() {
with_temp_ns(|client, ns| async move {
// Create a policy that does not require any authentications.
let srv = create(&client, web::server(&ns)).await;
let srv = create(&client, web::server(&ns, None)).await;
create(
&client,
authz_policy(&ns, "web", LocalTargetRef::from_resource(&srv), None),

View File

@ -9,7 +9,7 @@ use linkerd_policy_test::{
#[tokio::test(flavor = "current_thread")]
async fn meshtls() {
with_temp_ns(|client, ns| async move {
let srv = create(&client, web::server(&ns)).await;
let srv = create(&client, web::server(&ns, None)).await;
create(
&client,
@ -66,7 +66,7 @@ async fn network() {
// Once we know the IP of the (blocked) pod, create an web
// authorization policy that permits connections from this pod.
let srv = create(&client, web::server(&ns)).await;
let srv = create(&client, web::server(&ns, None)).await;
create(
&client,
server_authz(
@ -143,7 +143,7 @@ async fn both() {
// Once we know the IP of the (blocked) pod, create an web
// authorization policy that permits connections from this pod.
let srv = create(&client, web::server(&ns)).await;
let srv = create(&client, web::server(&ns, None)).await;
create(
&client,
server_authz(
@ -243,7 +243,7 @@ async fn either() {
// Once we know the IP of the (blocked) pod, create an web
// authorization policy that permits connections from this pod.
let srv = create(&client, web::server(&ns)).await;
let srv = create(&client, web::server(&ns, None)).await;
tokio::join!(
create(
&client,

View File

@ -32,7 +32,7 @@ async fn server_with_server_authorization() {
// Create a server that selects the pod's proxy admin server and ensure
// that the update now uses this server, which has no authorizations
let server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await;
let server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await;
let config = next_config(&mut rx).await;
assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol()));
assert_eq!(config.authorizations, vec![]);
@ -144,7 +144,7 @@ async fn server_with_authorization_policy() {
// Create a server that selects the pod's proxy admin server and ensure
// that the update now uses this server, which has no authorizations
let server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await;
let server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await;
let config = next_config(&mut rx).await;
assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol()));
assert_eq!(config.authorizations, vec![]);
@ -239,6 +239,68 @@ async fn server_with_authorization_policy() {
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn server_with_audit_policy() {
with_temp_ns(|client, ns| async move {
// Create a pod that does nothing. It's injected with a proxy, so we can
// attach policies to its admin server.
let pod = create_ready_pod(&client, mk_pause(&ns, "pause")).await;
let mut rx = retry_watch_server(&client, &ns, &pod.name_unchecked()).await;
let config = rx
.next()
.await
.expect("watch must not fail")
.expect("watch must return an initial config");
tracing::trace!(?config);
assert_is_default_all_unauthenticated!(config);
assert_protocol_detect!(config);
// Create a server with audit access policy that selects the pod's proxy admin server and
// ensure that the update now uses this server, and an unauthenticated authorization is
// returned
let server = create(
&client,
mk_admin_server(&ns, "linkerd-admin", Some("audit".to_string())),
)
.await;
let config = next_config(&mut rx).await;
assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol()));
assert_eq!(config.authorizations.len(), 1);
assert_eq!(
config.authorizations.first().unwrap().labels,
convert_args!(hashmap!(
"group" => "",
"kind" => "default",
"name" => "audit",
))
);
assert_eq!(
*config
.authorizations
.first()
.unwrap()
.authentication
.as_ref()
.unwrap(),
grpc::inbound::Authn {
permit: Some(grpc::inbound::authn::Permit::Unauthenticated(
grpc::inbound::authn::PermitUnauthenticated {}
)),
}
);
assert_eq!(
config.labels,
convert_args!(hashmap!(
"group" => "policy.linkerd.io",
"kind" => "server",
"name" => server.name_unchecked()
))
);
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn server_with_http_route() {
with_temp_ns(|client, ns| async move {
@ -259,7 +321,7 @@ async fn server_with_http_route() {
// Create a server that selects the pod's proxy admin server and ensure
// that the update now uses this server, which has no authorizations
// and no routes.
let _server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await;
let _server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await;
let config = next_config(&mut rx).await;
assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol()));
assert_eq!(config.authorizations, vec![]);
@ -419,7 +481,7 @@ async fn http_routes_ordered_by_creation() {
// Create a server that selects the pod's proxy admin server and ensure
// that the update now uses this server, which has no authorizations
// and no routes.
let _server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await;
let _server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await;
let config = next_config(&mut rx).await;
assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol()));
assert_eq!(config.authorizations, vec![]);
@ -622,7 +684,7 @@ fn mk_pause(ns: &str, name: &str) -> k8s::Pod {
}
}
fn mk_admin_server(ns: &str, name: &str) -> k8s::policy::Server {
fn mk_admin_server(ns: &str, name: &str, access_policy: Option<String>) -> k8s::policy::Server {
k8s::policy::Server {
metadata: k8s::ObjectMeta {
namespace: Some(ns.to_string()),
@ -633,6 +695,7 @@ fn mk_admin_server(ns: &str, name: &str) -> k8s::policy::Server {
selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::default()),
port: k8s::policy::server::Port::Number(4191.try_into().unwrap()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy,
},
}
}

View File

@ -459,6 +459,7 @@ fn mk_http_server(ns: &str, name: &str) -> k8s::policy::Server {
),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy: None,
},
}
}

View File

@ -22,6 +22,7 @@ async fn inbound_accepted_parent() {
)),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy: None,
},
};
let server = create(&client, server).await;
@ -99,6 +100,7 @@ async fn inbound_multiple_parents() {
)),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy: None,
},
};
let _server = create(&client, server).await;
@ -147,6 +149,7 @@ async fn inbound_no_parent_ref_patch() {
)),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy: None,
},
};
let server = create(&client, server).await;
@ -236,6 +239,7 @@ async fn inbound_accepted_reconcile_no_parent() {
)),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy: None,
},
};
create(&client, server).await;
@ -285,6 +289,7 @@ async fn inbound_accepted_reconcile_parent_delete() {
)),
port: k8s::policy::server::Port::Name("http".to_string()),
proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1),
access_policy: None,
},
};
create(&client, server).await;