mirror of https://github.com/linkerd/linkerd2.git
policy: Cleanup policy response labels (#6722)
Policy controller API responses include a set of labels. These labels are to be used in proxy m$etrics to indicate why traffic is permitted to a pod. This permits metrics to be associated with `Server` and ServerAuthorization` resources (i.e. for `stat`). This change updates the response API to include a `name` label referencing the server's name. When the policy is derived from a default configuration (and not a `Server` instance), the name takes the form 'default:<policy>'. This change also updates authorization labels. Defaults are encoded as servers are, otherwise the authorization's name is set as a label. The `tls` and `authn` labels have been removed, as they're redundant with other labels that are already present.
This commit is contained in:
parent
154ad9a228
commit
49f4af6e6b
|
@ -23,6 +23,7 @@ pub type InboundServerStream = Pin<Box<dyn Stream<Item = InboundServer> + Send +
|
|||
/// Inbound server configuration.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct InboundServer {
|
||||
pub name: String,
|
||||
pub protocol: ProxyProtocol,
|
||||
pub authorizations: HashMap<String, ClientAuthorization>,
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ use linkerd_policy_controller_core::{
|
|||
ClientAuthentication, ClientAuthorization, DiscoverInboundServer, IdentityMatch, InboundServer,
|
||||
InboundServerStream, IpNet, NetworkMatch, ProxyProtocol,
|
||||
};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use std::sync::Arc;
|
||||
use tracing::trace;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
@ -20,12 +20,6 @@ pub struct Server<T> {
|
|||
cluster_networks: Arc<[IpNet]>,
|
||||
}
|
||||
|
||||
struct Labels {
|
||||
authn: bool,
|
||||
tls: bool,
|
||||
name: String,
|
||||
}
|
||||
|
||||
// === impl Server ===
|
||||
|
||||
impl<T> Server<T>
|
||||
|
@ -200,9 +194,15 @@ fn to_server(srv: &InboundServer, cluster_networks: &[IpNet]) -> proto::Server {
|
|||
.collect();
|
||||
trace!(?authorizations);
|
||||
|
||||
let labels = vec![("name".to_string(), srv.name.to_string())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
trace!(?labels);
|
||||
|
||||
proto::Server {
|
||||
protocol: Some(protocol),
|
||||
authorizations,
|
||||
labels,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
@ -233,104 +233,62 @@ fn to_authz(
|
|||
.collect()
|
||||
};
|
||||
|
||||
match authentication {
|
||||
ClientAuthentication::Unauthenticated => {
|
||||
let labels = Labels {
|
||||
authn: false,
|
||||
tls: false,
|
||||
name: name.to_string(),
|
||||
};
|
||||
proto::Authz {
|
||||
networks,
|
||||
labels: labels.into(),
|
||||
authentication: Some(proto::Authn {
|
||||
permit: Some(proto::authn::Permit::Unauthenticated(
|
||||
proto::authn::PermitUnauthenticated {},
|
||||
)),
|
||||
}),
|
||||
}
|
||||
}
|
||||
let labels = vec![("name".to_string(), name.to_string())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
ClientAuthentication::TlsUnauthenticated => {
|
||||
let labels = Labels {
|
||||
authn: false,
|
||||
tls: true,
|
||||
name: name.to_string(),
|
||||
};
|
||||
proto::Authz {
|
||||
networks,
|
||||
labels: labels.into(),
|
||||
authentication: Some(proto::Authn {
|
||||
permit: Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls {
|
||||
clients: Some(proto::authn::permit_mesh_tls::Clients::Unauthenticated(
|
||||
proto::authn::PermitUnauthenticated {},
|
||||
)),
|
||||
})),
|
||||
}),
|
||||
}
|
||||
}
|
||||
let authn = match authentication {
|
||||
ClientAuthentication::Unauthenticated => proto::Authn {
|
||||
permit: Some(proto::authn::Permit::Unauthenticated(
|
||||
proto::authn::PermitUnauthenticated {},
|
||||
)),
|
||||
},
|
||||
|
||||
ClientAuthentication::TlsUnauthenticated => proto::Authn {
|
||||
permit: Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls {
|
||||
clients: Some(proto::authn::permit_mesh_tls::Clients::Unauthenticated(
|
||||
proto::authn::PermitUnauthenticated {},
|
||||
)),
|
||||
})),
|
||||
},
|
||||
|
||||
// Authenticated connections must have TLS and apply to all
|
||||
// networks.
|
||||
ClientAuthentication::TlsAuthenticated(identities) => {
|
||||
let labels = Labels {
|
||||
authn: true,
|
||||
tls: true,
|
||||
name: name.to_string(),
|
||||
};
|
||||
let suffixes = identities
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
IdentityMatch::Suffix(s) => Some(proto::IdentitySuffix { parts: s.to_vec() }),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let authn = {
|
||||
let suffixes = identities
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
IdentityMatch::Suffix(s) => {
|
||||
Some(proto::IdentitySuffix { parts: s.to_vec() })
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
let identities = identities
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
IdentityMatch::Name(n) => Some(proto::Identity {
|
||||
name: n.to_string(),
|
||||
}),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let identities = identities
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
IdentityMatch::Name(n) => Some(proto::Identity {
|
||||
name: n.to_string(),
|
||||
}),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
proto::Authn {
|
||||
permit: Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls {
|
||||
clients: Some(proto::authn::permit_mesh_tls::Clients::Identities(
|
||||
proto::authn::permit_mesh_tls::PermitClientIdentities {
|
||||
identities,
|
||||
suffixes,
|
||||
},
|
||||
)),
|
||||
})),
|
||||
}
|
||||
};
|
||||
|
||||
proto::Authz {
|
||||
networks,
|
||||
labels: labels.into(),
|
||||
authentication: Some(authn),
|
||||
proto::Authn {
|
||||
permit: Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls {
|
||||
clients: Some(proto::authn::permit_mesh_tls::Clients::Identities(
|
||||
proto::authn::permit_mesh_tls::PermitClientIdentities {
|
||||
identities,
|
||||
suffixes,
|
||||
},
|
||||
)),
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// === impl Labels ===
|
||||
|
||||
impl From<Labels> for HashMap<String, String> {
|
||||
fn from(labels: Labels) -> HashMap<String, String> {
|
||||
vec![
|
||||
("authn".to_string(), labels.authn.to_string()),
|
||||
("tls".to_string(), labels.tls.to_string()),
|
||||
("name".to_string(), labels.name),
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
};
|
||||
|
||||
proto::Authz {
|
||||
networks,
|
||||
labels,
|
||||
authentication: Some(authn),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -171,6 +171,7 @@ impl DefaultPolicyWatches {
|
|||
}
|
||||
|
||||
DefaultPolicy::Deny => InboundServer {
|
||||
name: "default:deny".to_string(),
|
||||
protocol,
|
||||
authorizations: Default::default(),
|
||||
},
|
||||
|
@ -218,6 +219,7 @@ impl DefaultPolicyWatches {
|
|||
};
|
||||
|
||||
InboundServer {
|
||||
name: name.clone(),
|
||||
protocol,
|
||||
authorizations: Some((name, authz)).into_iter().collect(),
|
||||
}
|
||||
|
|
|
@ -191,6 +191,7 @@ impl SrvIndex {
|
|||
.collect::<HashMap<_, _>>();
|
||||
debug!(authzs = ?authzs.keys());
|
||||
let (tx, rx) = watch::channel(InboundServer {
|
||||
name: entry.key().clone(),
|
||||
protocol: protocol.clone(),
|
||||
authorizations: authzs.clone(),
|
||||
});
|
||||
|
|
|
@ -40,15 +40,13 @@ async fn incrementally_configure_server() {
|
|||
);
|
||||
idx.apply_pod(pod.clone()).unwrap();
|
||||
|
||||
let default = DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: true,
|
||||
};
|
||||
let default_config = InboundServer {
|
||||
authorizations: mk_default_policy(
|
||||
DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: true,
|
||||
},
|
||||
cluster_net,
|
||||
kubelet_ip,
|
||||
),
|
||||
name: format!("default:{}", default),
|
||||
authorizations: mk_default_policy(default, cluster_net, kubelet_ip),
|
||||
protocol: ProxyProtocol::Detect {
|
||||
timeout: detect_timeout,
|
||||
},
|
||||
|
@ -77,6 +75,7 @@ async fn incrementally_configure_server() {
|
|||
// Check that the watch has been updated to reflect the above change and that this change _only_
|
||||
// applies to the correct port.
|
||||
let basic_config = InboundServer {
|
||||
name: "srv-0".into(),
|
||||
protocol: ProxyProtocol::Http1,
|
||||
authorizations: vec![healthcheck_authz(kubelet_ip)].into_iter().collect(),
|
||||
};
|
||||
|
@ -103,6 +102,7 @@ async fn incrementally_configure_server() {
|
|||
assert_eq!(
|
||||
time::timeout(time::Duration::from_secs(1), rx.next()).await,
|
||||
Ok(Some(InboundServer {
|
||||
name: "srv-0".into(),
|
||||
protocol: ProxyProtocol::Http1,
|
||||
authorizations: vec![
|
||||
(
|
||||
|
@ -150,13 +150,14 @@ fn server_update_deselects_pod() {
|
|||
(ips.next().unwrap(), ips.next().unwrap())
|
||||
};
|
||||
let detect_timeout = time::Duration::from_secs(1);
|
||||
let default = DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: true,
|
||||
};
|
||||
let (lookup_rx, mut idx) = Index::new(
|
||||
vec![cluster_net],
|
||||
"cluster.example.com".into(),
|
||||
DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: true,
|
||||
},
|
||||
default,
|
||||
detect_timeout,
|
||||
);
|
||||
|
||||
|
@ -182,6 +183,7 @@ fn server_update_deselects_pod() {
|
|||
assert_eq!(
|
||||
port2222.get(),
|
||||
InboundServer {
|
||||
name: "srv-0".into(),
|
||||
protocol: ProxyProtocol::Http2,
|
||||
authorizations: vec![healthcheck_authz(kubelet_ip)].into_iter().collect(),
|
||||
}
|
||||
|
@ -195,14 +197,8 @@ fn server_update_deselects_pod() {
|
|||
assert_eq!(
|
||||
port2222.get(),
|
||||
InboundServer {
|
||||
authorizations: mk_default_policy(
|
||||
DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: true,
|
||||
},
|
||||
cluster_net,
|
||||
kubelet_ip
|
||||
),
|
||||
name: format!("default:{}", default),
|
||||
authorizations: mk_default_policy(default, cluster_net, kubelet_ip),
|
||||
protocol: ProxyProtocol::Detect {
|
||||
timeout: detect_timeout,
|
||||
},
|
||||
|
@ -243,6 +239,7 @@ fn default_policy_global() {
|
|||
idx.reset_pods(vec![p]).unwrap();
|
||||
|
||||
let config = InboundServer {
|
||||
name: format!("default:{}", default),
|
||||
authorizations: mk_default_policy(*default, cluster_net, kubelet_ip),
|
||||
protocol: ProxyProtocol::Detect {
|
||||
timeout: detect_timeout,
|
||||
|
@ -300,6 +297,7 @@ fn default_policy_annotated() {
|
|||
idx.reset_pods(vec![p]).unwrap();
|
||||
|
||||
let config = InboundServer {
|
||||
name: format!("default:{}", default),
|
||||
authorizations: mk_default_policy(*default, cluster_net, kubelet_ip),
|
||||
protocol: ProxyProtocol::Detect {
|
||||
timeout: detect_timeout,
|
||||
|
@ -323,13 +321,14 @@ fn default_policy_annotated_invalid() {
|
|||
};
|
||||
let detect_timeout = time::Duration::from_secs(1);
|
||||
|
||||
let default = DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: false,
|
||||
};
|
||||
let (lookup_rx, mut idx) = Index::new(
|
||||
vec![cluster_net],
|
||||
"cluster.example.com".into(),
|
||||
DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
cluster_only: false,
|
||||
},
|
||||
default,
|
||||
detect_timeout,
|
||||
);
|
||||
|
||||
|
@ -353,6 +352,7 @@ fn default_policy_annotated_invalid() {
|
|||
assert_eq!(
|
||||
port2222.get(),
|
||||
InboundServer {
|
||||
name: format!("default:{}", default),
|
||||
authorizations: mk_default_policy(
|
||||
DefaultPolicy::Allow {
|
||||
authenticated_only: false,
|
||||
|
@ -400,6 +400,7 @@ fn opaque_annotated() {
|
|||
idx.reset_pods(vec![p]).unwrap();
|
||||
|
||||
let config = InboundServer {
|
||||
name: format!("default:{}", default),
|
||||
authorizations: mk_default_policy(*default, cluster_net, kubelet_ip),
|
||||
protocol: ProxyProtocol::Opaque,
|
||||
};
|
||||
|
@ -444,21 +445,21 @@ fn authenticated_annotated() {
|
|||
);
|
||||
idx.reset_pods(vec![p]).unwrap();
|
||||
|
||||
let config = InboundServer {
|
||||
authorizations: mk_default_policy(
|
||||
match *default {
|
||||
DefaultPolicy::Allow { cluster_only, .. } => DefaultPolicy::Allow {
|
||||
cluster_only,
|
||||
authenticated_only: true,
|
||||
},
|
||||
DefaultPolicy::Deny => DefaultPolicy::Deny,
|
||||
let config = {
|
||||
let policy = match *default {
|
||||
DefaultPolicy::Allow { cluster_only, .. } => DefaultPolicy::Allow {
|
||||
cluster_only,
|
||||
authenticated_only: true,
|
||||
},
|
||||
cluster_net,
|
||||
kubelet_ip,
|
||||
),
|
||||
protocol: ProxyProtocol::Detect {
|
||||
timeout: detect_timeout,
|
||||
},
|
||||
DefaultPolicy::Deny => DefaultPolicy::Deny,
|
||||
};
|
||||
InboundServer {
|
||||
name: format!("default:{}", policy),
|
||||
authorizations: mk_default_policy(policy, cluster_net, kubelet_ip),
|
||||
protocol: ProxyProtocol::Detect {
|
||||
timeout: detect_timeout,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
let port2222 = lookup_rx
|
||||
|
|
Loading…
Reference in New Issue