diff --git a/wg-security-audit/README.md b/wg-security-audit/README.md
index e2018ad39..bbdd49249 100644
--- a/wg-security-audit/README.md
+++ b/wg-security-audit/README.md
@@ -29,9 +29,30 @@ Perform a security audit on k8s with a vendor and produce as artifacts a threat
* [Open Community Issues/PRs](https://github.com/kubernetes/community/labels/wg%2Fsecurity-audit)
+## Published Documents
+
+Trail of Bits and Atredis Partners, in collaboration with the Security Audit Working Group, have released the following documents which
+detail their assessment of Kubernetes security posture and their findings.
+
+### Findings
+
+* [Kubernetes Security Review](findings/Kubernetes%20Final%20Report.pdf)
+* [Attacking and Defending Kubernetes Installations](findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf)
+* [Whitepaper](findings/Kubernetes%20White%20Paper.pdf)
+* [Threat Model](findings/Kubernetes%20Threat%20Model.pdf)
+
+### Ancillary Data
+
+* [Rapid Risk Assessments](ancillary-data/rapid-risk-assessments)
+* [Dataflow](ancillary-data/dataflow)
+
+## Mailing Lists
+
+* Sensitive communications regarding the audit should be sent to the [private variant of the mailing list](https://groups.google.com/forum/#!forum/kubernetes-wg-security-audit-private).
+
## Request For Proposals
-
-The RFP will be open between 2018/10/29 and 2018/11/30 and has been published [here](https://github.com/kubernetes/community/blob/master/wg-security-audit/RFP.md).
+
+The RFP was open between 2018/10/29 and 2018/11/30 and has been published [here](https://github.com/kubernetes/community/blob/master/wg-security-audit/RFP.md).
## Vendor Selection
@@ -39,8 +60,4 @@ The [RFP](https://github.com/kubernetes/community/blob/master/wg-security-audit/
You can read more about the vendor selection [here](RFP_Decision.md).
-## Mailing Lists
-
-* Sensitive communications regarding the audit should be sent to the [private variant of the mailing list](https://groups.google.com/forum/#!forum/kubernetes-wg-security-audit-private).
-
diff --git a/wg-security-audit/ancillary-data/dataflow/original dataflow.dot b/wg-security-audit/ancillary-data/dataflow/original dataflow.dot
new file mode 100644
index 000000000..02d2f830f
--- /dev/null
+++ b/wg-security-audit/ancillary-data/dataflow/original dataflow.dot
@@ -0,0 +1,47 @@
+digraph K8S {
+ subgraph cluster_apiserverinternal {
+ node [style=filled];
+ color=green;
+ etcd[label="etcd"];
+ label = "API Server Data Layer";
+ }
+
+ subgraph cluster_apiserver {
+ node [style=filled];
+ color=blue;
+ kubeapiserver[label="kube-apiserver"];
+ kubeapiserver->etcd[label="HTTPS"]
+ label = "API Server";
+ }
+
+ subgraph cluster_mastercomponents {
+ node [style=filled];
+ label = "Master Control Plane Components";
+ scheduler[label="Scheduler"];
+ controllers[label="Controllers"]
+ scheduler->kubeapiserver[label="Callback/HTTPS"];
+ controllers->kubeapiserver[label="Callback/HTTPS"];
+ color=black;
+ }
+
+ subgraph cluster_worker {
+ label="Worker"
+ color="blue"
+ kubelet->kubeapiserver[label="authenticated HTTPS"]
+ kubeproxy[label="kube-proxy"]
+ iptables->kubeproxy->iptables
+ pods[label="pods with various containers"]
+ pods->kubeproxy->pods
+ }
+
+ subgraph cluster_internet {
+ label="Internet"
+ authuser[label="Authorized User via kubebctl"]
+ generaluser[label="General User"]
+ authuser->kubeapiserver[label="Authenticated HTTPS"]
+ generaluser->pods[label="application-specific connection protocol"]
+ }
+ kubeapiserver->kubelet[label="HTTPS"]
+ kubeapiserver->pods[label="HTTP",color=red]
+}
+
diff --git a/wg-security-audit/ancillary-data/dataflow/original dataflow.png b/wg-security-audit/ancillary-data/dataflow/original dataflow.png
new file mode 100644
index 000000000..62c6680e4
Binary files /dev/null and b/wg-security-audit/ancillary-data/dataflow/original dataflow.png differ
diff --git a/wg-security-audit/ancillary-data/dataflow/process.sh b/wg-security-audit/ancillary-data/dataflow/process.sh
new file mode 100644
index 000000000..0a446eb31
--- /dev/null
+++ b/wg-security-audit/ancillary-data/dataflow/process.sh
@@ -0,0 +1,3 @@
+python3 tm.py --dfd > updated-dataflow.dot
+dot -Tpng < updated-dataflow.dot > updated-dataflow.png
+open updated-dataflow.png
diff --git a/wg-security-audit/ancillary-data/dataflow/requirements.txt b/wg-security-audit/ancillary-data/dataflow/requirements.txt
new file mode 100644
index 000000000..f65609d4c
--- /dev/null
+++ b/wg-security-audit/ancillary-data/dataflow/requirements.txt
@@ -0,0 +1 @@
+pytm==0.4
diff --git a/wg-security-audit/ancillary-data/dataflow/tm.py b/wg-security-audit/ancillary-data/dataflow/tm.py
new file mode 100644
index 000000000..245501ff5
--- /dev/null
+++ b/wg-security-audit/ancillary-data/dataflow/tm.py
@@ -0,0 +1,106 @@
+# !/usr/bin/env python3
+
+from pytm.pytm import TM, Server, Datastore, Dataflow, Boundary, Actor, Lambda, Process
+
+tm = TM("Kubernetes Threat Model")
+tm.description = "a deep-dive threat model of Kubernetes"
+
+# Boundaries
+
+inet = Boundary("Internet")
+mcdata = Boundary("Master Control Data")
+apisrv = Boundary("API Server")
+mcomps = Boundary("Master Control Components")
+worker = Boundary("Worker")
+contain = Boundary("Container")
+
+# Actors
+
+miu = Actor("Malicious Internal User")
+ia = Actor("Internal Attacker")
+ea = Actor("External Actor")
+admin = Actor("Administrator")
+dev = Actor("Developer")
+eu = Actor("End User")
+
+# Server & OS Components
+
+etcd = Datastore("N-ary etcd servers")
+apiserver = Server("kube-apiserver")
+kubelet = Server("kubelet")
+kubeproxy = Server("kube-proxy")
+scheduler = Server("kube-scheduler")
+controllers = Server("CCM/KCM")
+pods = Server("Pods")
+iptables = Process("iptables")
+
+# Component <> Boundary Relations
+etcd.inBoundary = mcdata
+mcdata.inBoundary = apisrv
+apiserver.inBoundary = apisrv
+kubelet.inBoundary = worker
+kubeproxy.inBoundary = worker
+pods.inBoundary = contain
+scheduler.inBoundary = mcomps
+controllers.inBoundary = mcomps
+pods.inBoundary = contain
+iptables.inBoundary = worker
+miu.inBoundary = apisrv
+ia.inBoundary = contain
+ea.inBoundary = inet
+admin.inBoundary = apisrv
+dev.inBoundary = inet
+eu.inBoundary = inet
+
+# Dataflows
+
+apiserver2etcd = Dataflow(apiserver, etcd, "All kube-apiserver data")
+apiserver2etcd.isEncrypted = True
+apiserver2etcd.protocol = "HTTPS"
+
+apiserver2kubelet = Dataflow(apiserver, kubelet, "kubelet Health, Status, &c.")
+apiserver2kubelet.isEncrypted = False
+apiserver2kubelet.protocol = "HTTP"
+
+apiserver2kubeproxy = Dataflow(apiserver, kubeproxy, "kube-proxy Health, Status, &c.")
+apiserver2kubeproxy.isEncrypted = False
+apiserver2kubeproxy.protocol = "HTTP"
+
+apiserver2scheduler = Dataflow(apiserver, scheduler, "kube-scheduler Health, Status, &c.")
+apiserver2scheduler.isEncrypted = False
+apiserver2scheduler.protocol = "HTTP"
+
+apiserver2controllers = Dataflow(apiserver, controllers, "{kube, cloud}-controller-manager Health, Status, &c.")
+apiserver2controllers.isEncrypted = False
+apiserver2controllers.protocol = "HTTP"
+
+kubelet2apiserver = Dataflow(kubelet, apiserver, "HTTP watch for resources on kube-apiserver")
+kubelet2apiserver.isEncrypted = True
+kubelet2apiserver.protocol = "HTTPS"
+
+kubeproxy2apiserver = Dataflow(kubeproxy, apiserver, "HTTP watch for resources on kube-apiserver")
+kubeproxy2apiserver.isEncrypted = True
+kubeproxy2apiserver.protocol = "HTTPS"
+
+controllers2apiserver = Dataflow(controllers, apiserver, "HTTP watch for resources on kube-apiserver")
+controllers2apiserver.isEncrypted = True
+controllers2apiserver.protocol = "HTTPS"
+
+scheduler2apiserver = Dataflow(scheduler, apiserver, "HTTP watch for resources on kube-apiserver")
+scheduler2apiserver.isEncrypted = True
+scheduler2apiserver.protocol = "HTTPS"
+
+kubelet2iptables = Dataflow(kubelet, iptables, "kubenet update of iptables (... ipvs, &c) to setup Host-level ports")
+kubelet2iptables.protocol = "IPC"
+
+kubeproxy2iptables = Dataflow(kubeproxy, iptables, "kube-prxy update of iptables (... ipvs, &c) to setup all pod networking")
+kubeproxy2iptables.protocol = "IPC"
+
+kubelet2pods = Dataflow(kubelet, pods, "kubelet to pod/CRI runtime, to spin up pods within a host")
+kubelet2pods.protocol = "IPC"
+
+eu2pods = Dataflow(eu, pods, "End-user access of Kubernetes-hosted applications")
+ea2pods = Dataflow(ea, pods, "External Attacker attempting to compromise a trust boundary")
+ia2cnts = Dataflow(ia, pods, "Internal Attacker with access to a compromised or malicious pod")
+
+tm.process()
diff --git a/wg-security-audit/ancillary-data/dataflow/updated-dataflow.dot b/wg-security-audit/ancillary-data/dataflow/updated-dataflow.dot
new file mode 100644
index 000000000..671e2dde0
--- /dev/null
+++ b/wg-security-audit/ancillary-data/dataflow/updated-dataflow.dot
@@ -0,0 +1,217 @@
+digraph tm {
+ graph [
+ fontname = Arial;
+ fontsize = 14;
+ ]
+ node [
+ fontname = Arial;
+ fontsize = 14;
+ rankdir = lr;
+ ]
+ edge [
+ shape = none;
+ fontname = Arial;
+ fontsize = 12;
+ ]
+ labelloc = "t";
+ fontsize = 20;
+ nodesep = 1;
+
+subgraph cluster_bfaefefcfbeeafeefac {
+ graph [
+ fontsize = 10;
+ fontcolor = firebrick2;
+ style = dashed;
+ color = firebrick2;
+ label = <Internet>;
+ ]
+
+bfbeacdafaceebdccfdffcdfcedfec [
+ shape = square;
+ label = <
>;
+]
+abaadcacbbafdffbcffffbeedef [
+ shape = square;
+ label = <>;
+]
+adafdaeaedeedcafe [
+ shape = square;
+ label = <>;
+]
+
+}
+
+subgraph cluster_bbfdadaacbdaedcebfec {
+ graph [
+ fontsize = 10;
+ fontcolor = firebrick2;
+ style = dashed;
+ color = firebrick2;
+ label = <Master Control Data>;
+ ]
+
+bfffcaeeeeedccabfaaeff [
+ shape = none;
+ color = black;
+ label = <>;
+]
+
+}
+
+subgraph cluster_afeffbbfdbeeefcabddacdba {
+ graph [
+ fontsize = 10;
+ fontcolor = firebrick2;
+ style = dashed;
+ color = firebrick2;
+ label = <API Server>;
+ ]
+
+bdfbefabdbefeacdfcabaac [
+ shape = square;
+ label = <>;
+]
+fabeebdadbcdffdcdec [
+ shape = square;
+ label = <>;
+]
+eadddadcfbabebaed [
+ shape = circle
+ color = black
+ label = <>;
+]
+
+}
+
+subgraph cluster_cebcbebffccbfedcaffbb {
+ graph [
+ fontsize = 10;
+ fontcolor = firebrick2;
+ style = dashed;
+ color = firebrick2;
+ label = <Master Control Components>;
+ ]
+
+ffceacecdbcacdddddffbfa [
+ shape = circle
+ color = black
+ label = <>;
+]
+adffdceecfcfbcfdaefca [
+ shape = circle
+ color = black
+ label = <>;
+]
+
+}
+
+subgraph cluster_baaffdafbdceebaaafaefeea {
+ graph [
+ fontsize = 10;
+ fontcolor = firebrick2;
+ style = dashed;
+ color = firebrick2;
+ label = <Worker>;
+ ]
+
+dbddcfaeaacebaecba [
+ shape = circle
+ color = black
+ label = <>;
+]
+ddcaffdfdebdaeff [
+ shape = circle
+ color = black
+ label = <>;
+]
+bcdcebabbdaadffeaeddcce [
+ shape = circle;
+ color = black;
+
+ label = <>;
+]
+
+}
+
+subgraph cluster_fdcecbcfbeadaccab {
+ graph [
+ fontsize = 10;
+ fontcolor = firebrick2;
+ style = dashed;
+ color = firebrick2;
+ label = <Container>;
+ ]
+
+bdfadfbeeaedceab [
+ shape = square;
+ label = <>;
+]
+eefbffbeaaeecaceaaabe [
+ shape = circle
+ color = black
+ label = <>;
+]
+
+}
+
+ eadddadcfbabebaed -> bfffcaeeeeedccabfaaeff [
+ color = black;
+ label = <>;
+ ]
+ eadddadcfbabebaed -> dbddcfaeaacebaecba [
+ color = black;
+ label = <kubelet Health, Status, &c. |
>;
+ ]
+ eadddadcfbabebaed -> ddcaffdfdebdaeff [
+ color = black;
+ label = <kube-proxy Health, Status, &c. |
>;
+ ]
+ eadddadcfbabebaed -> ffceacecdbcacdddddffbfa [
+ color = black;
+ label = <kube-scheduler Health, Status, &c. |
>;
+ ]
+ eadddadcfbabebaed -> adffdceecfcfbcfdaefca [
+ color = black;
+ label = <{kube, cloud}-controller-manager Health, Status, &c. |
>;
+ ]
+ dbddcfaeaacebaecba -> eadddadcfbabebaed [
+ color = black;
+ label = <HTTP watch for resources on kube-apiserver |
>;
+ ]
+ ddcaffdfdebdaeff -> eadddadcfbabebaed [
+ color = black;
+ label = <HTTP watch for resources on kube-apiserver |
>;
+ ]
+ adffdceecfcfbcfdaefca -> eadddadcfbabebaed [
+ color = black;
+ label = <HTTP watch for resources on kube-apiserver |
>;
+ ]
+ ffceacecdbcacdddddffbfa -> eadddadcfbabebaed [
+ color = black;
+ label = <HTTP watch for resources on kube-apiserver |
>;
+ ]
+ dbddcfaeaacebaecba -> bcdcebabbdaadffeaeddcce [
+ color = black;
+ label = <kubenet update of iptables (... ipvs, &c) to setup Host-level ports |
>;
+ ]
+ ddcaffdfdebdaeff -> bcdcebabbdaadffeaeddcce [
+ color = black;
+ label = <kube-prxy update of iptables (... ipvs, &c) to setup all pod networking |
>;
+ ]
+ dbddcfaeaacebaecba -> eefbffbeaaeecaceaaabe [
+ color = black;
+ label = <kubelet to pod/CRI runtime, to spin up pods within a host |
>;
+ ]
+ adafdaeaedeedcafe -> eefbffbeaaeecaceaaabe [
+ color = black;
+ label = <End-user access of Kubernetes-hosted applications |
>;
+ ]
+ bfbeacdafaceebdccfdffcdfcedfec -> eefbffbeaaeecaceaaabe [
+ color = black;
+ label = <External Attacker attempting to compromise a trust boundary |
>;
+ ]
+ bdfadfbeeaedceab -> eefbffbeaaeecaceaaabe [
+ color = black;
+ label = <Internal Attacker with access to a compromised or malicious pod |
>;
+ ]
+}
diff --git a/wg-security-audit/ancillary-data/dataflow/updated-dataflow.png b/wg-security-audit/ancillary-data/dataflow/updated-dataflow.png
new file mode 100644
index 000000000..c86cd09e1
Binary files /dev/null and b/wg-security-audit/ancillary-data/dataflow/updated-dataflow.png differ
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/container-runtime.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/container-runtime.md
new file mode 100644
index 000000000..98130bf17
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/container-runtime.md
@@ -0,0 +1,141 @@
+# Overview
+
+- Component: Container Runtime
+- Owner(s): [sig-node](https://github.com/kubernetes/community/blob/master/sig-node/README.md)
+- SIG/WG(s) at meeting:
+- Service Data Classification: High
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+- Container Runtimes expose an IPC endpoint such as a file system socket
+- kubelet retrieves pods to be executed from the kube-apiserver
+- The Container Runtime Interface then executes the necessary commands/requests from the actual container system (e.g. docker) to run the pod
+
+## Are there any subcomponents or shared boundaries?
+
+Yes
+
+- The Container Runtime technically interfaces with kublet, and runs on the same host
+- However, the Container Runtime is logically a separate Trust Zone within the node
+
+## What communications protocols does it use?
+
+Various, depends on the IPC mechanism required by the Container Runtime
+
+## Where does it store data?
+
+Most data should be provided by kubelet or the CRI in running the container
+
+## What is the most sensitive data it stores?
+
+N/A
+
+## How is that data stored?
+
+N/A
+
+# Meeting Notes
+
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+- CRI Runs an HTTP server
+ - port forwarding, exec, attach
+- !FINDING TLS bye default, but not mutual TLS, and self-signed
+ - kubelet -> exec request to CRI over gRPC
+ - Returns URL with single use Token
+ - gRPC is Unix Domain by default
+- Kubelet proxies or responds w/ redirect to API server (locally hosted CRI only)
+- !FINDING(same HTTP finding for pull as kubectl) CRI actually pulls images, no egress filtering
+ - image tag is SHA256, CRI checks that
+- Not sure how CNI, it might be exec
+- only responds to connections
+- CRI uses Standard Go HTTP
+
+## Cryptography
+
+- Nothing beyond TLS
+
+## Secrets Management
+
+- !FINDING auth'd container repos, passed in via podspec, fetched by kubelet, are passed via CLI
+ - so anyone with access to the host running the container can see those secrets
+
+## Authentication
+
+- Unix Domain Socket for gRPC, so Linux authN/authZ
+- !FINDING 8 character random single use token with 1 minute lifetype (response to line 109)
+
+## Authorization
+
+- no authZ
+
+## Multi-tenancy Isolation
+
+- knows nothing about tenants or namespaces
+- low-level component, kubelet/api-server is the arbiter
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/etcd.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/etcd.md
new file mode 100644
index 000000000..bbba4fff5
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/etcd.md
@@ -0,0 +1,162 @@
+# Overview
+
+- Component: etcd
+- Owner(s): Technically external to Kubernetes itself, but managed by [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery)
+- SIG/WG(s) at meeting:
+- Service Data Classification: Critical (on a cluster with an API server, access to etcd is root access to the cluster)
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+- Distributed key-value store
+- uses RAFT for consensus
+ - always need to deploy (N x M) + 1 members to avoid leader election issues
+ - five is recommended for production usage
+- listens for requests from clients
+- clients are simple REST clients that interact via JSON or other mechanisms
+- in Kubernetes' case, data is stored under `/registry`
+
+## Are there any subcomponents or shared boundaries?
+
+There shouldn't be; documentation specifically states:
+
+- should be in own cluster
+- limited to access by the API server(s) only
+- should use some sort of authentication (hopefully certificate auth)
+
+## What communications protocols does it use?
+
+- HTTPS (with optional client-side or two-way TLS)
+ - can also use basic auth
+- there's technically gRPC as well
+
+## Where does it store data?
+
+- typical database-style:
+ - data directory
+ - snapshot directory
+ - write-ahead log (WAL) directory
+- all three may be the same, depends on command line options
+- Consensus is then achieved across nodes via RAFT (leader election + log replication via distributed state machine)
+
+## What is the most sensitive data it stores?
+
+- literally holds the keys to the kingdom:
+ - pod specs
+ - secrets
+ - roles/attributes for {R, A}BAC
+ - literally any data stored in Kubernetes via the kube-apiserver
+- [Access to etcd is equivalent to root permission in the cluster](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-etcd-clusters)
+
+## How is that data stored?
+
+- Outside the scope of this assessment per se, but not encrypted at rest
+- Kubernetes supports this itself with Encryption providers
+- the typical process of a WAL + data + snapshot is used
+- this is then replicated across the cluster with Raft
+
+# Meeting Notes
+
+- No authorization (from k8s perspective)
+- AUthentication by local port access in current k8s
+ - working towards mTLS for all connections
+- Raft consensus port, listener port
+- backups in etcd (system-level) not encrypted
+- metrics aren't encrypted at all either
+- multi-tenant: no multi-tenant controls at all
+ - the kube-apiserver is the arbiter namespaces
+ - could add namespaces to the registry, but that is a large amount of work
+ - no migration plan or test
+ - watches (like kubelet watching for pod spec changes) would break
+ - multi-single tenant is best route
+- RAFT port may be open by default, even in single etcd configuraitons
+- runs in a container within static Master kubelet, but is run as root
+- [CONTROL WEAKNESS] CA is passed on command line
+- Types of files: WAL, Snapshot, Data file (and maybe backup)
+ - [FINDING] no checksums on WAL/Snapshot/Data
+ - [RECOMMENDATION] checksum individual WAL entries, checksum the entire snapshot file
+ - do this because it's fast enough for individual entries, and then the snapshot should never change
+- Crypto, really only TLS (std go) and checksums for backups (but not other files, as noted above)
+- No auditing, but that's less useful
+ - kube-apiserver is the arbiter of what things are
+ - kube-apiserver uses a single connection credential to etcd w/o impersonation, so harder to tell who did what
+ - major events end up in the app log
+ - debug mode allows you to see all events when they happen
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+## Cryptography
+
+## Secrets Management
+
+## Authentication
+
+- by default Kubernetes doesn't use two-way TLS to the etcd cluster, which would be the most secure (combined with IP restrictions so that stolen creds can't be reused on new infrastructure)
+
+## Authorization
+
+## Multi-tenancy Isolation
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/kcm-ccm-notes.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/kcm-ccm-notes.md
new file mode 100644
index 000000000..b6bfbb64c
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/kcm-ccm-notes.md
@@ -0,0 +1,18 @@
+# Meeting notes
+
+- CCM per cloud provider
+- same host as kube-apiserver
+- caches live in memory
+- refresh cache, but can be forced to by request
+- Controller manager attempts to use PoLA, but the service account controller has permission to write to it's own policies
+- Cloud controller (routes, IPAM, &c.) can talk to external resources
+- CCM/KCM have no notion of multi-tenant, and there are implications going forward
+- Deployments across namespace
+- cloud controller has access to cloud credentials (passed in by various means, as we saw in the code)
+- CCM is a reference implementation, meant to separate out other company's code
+ - So Amazon doesn't need to have Red Hat's code running, &c.
+- shared acache across all controllers
+- [FINDING] separate out high privileged controllers from lower privileged ones, so there's no confused deputy
+ - single binary for controller
+ - if you can trick the service account controller into granting access to things you shouldn't (for example) that would be problematic
+ - make a "privileged controller manager" which bundles high and low-privileged controllers, and adds another trust boundary
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-apiserver.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-apiserver.md
new file mode 100644
index 000000000..0fb851d80
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-apiserver.md
@@ -0,0 +1,187 @@
+# Overview
+
+- Component: kube-apiserver
+- Owner(s): [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery)
+- SIG/WG(s) at meeting:
+- Service Data Classification: Critical (technically, it isn't needed, but most clusters will use it extensively)
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+- RESTful API server
+- made up of multiple subcomponents:
+ - authenticators
+ - authorizers
+ - admission controllers
+ - resource validators
+- users issue a request, which is authenticated via one (or more) plugins
+- the requests is then authorized by one or more authorizers
+- it is then potentially modified and validated by an admission controller
+- resource validation that validates the object, stores it in etcd, and responds
+- clients issue HTTP requests (via TLS ala HTTPS) to "watch" resources and poll for changes from the server; for example:
+ 1. a client updates a pod definition via `kubectl` and a `POST` request
+ 1. the scheduler is "watching" for pod updates via an HTTP watch request to retrieve new pods
+ 1. the scheduler then update the pod list via a `POST` to the kube-apiserver
+ 1. a node's `kubelet` retrieves a list of pods assigned to it via an HTTP watch request
+ 1. the node's `kubelet` then update the running pod list on the kube-apiserver
+
+## Are there any subcomponents or shared boundaries?
+
+Yes
+
+- Controllers technically run on the kube-apiserver
+- the various subcomponents (authenticators, authorizers, and so on) run on the kube-apiserver
+
+additionally, depending on the configuration there may be any number of other Master Control Pane components running on the same phyical/logical host
+
+## What communications protocols does it use?
+
+- Communcations to the kube-apiserver use HTTPS and various authentication mechanisms
+- Communications from the kube-apiserver to etcd use HTTPS, with optional client-side (two-way) TLS
+- Communications from the kube-apiserver to kubelets can use HTTP or HTTPS, the latter is without validation by default (find this again in the docs)
+
+## Where does it store data?
+
+- Most data is stored in etcd, mainly under `/registry`
+- Some data is obviously stored on the local host, to bootstrap the connection to etcd
+
+## What is the most sensitive data it stores?
+
+- Not much sensitive is directly stored on kube-apiserver
+- However, all sensitive data within the system (save for in MCP-less setups) is processed and transacted via the kube-apiserver
+
+## How is that data stored?
+
+- On etcd, with the level of protection requested by the user
+- looks like encryption [is a command line flag](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#configuration-and-determining-whether-encryption-at-rest-is-already-enabled)
+
+# Meeting notes
+
+- web hooks: kube-apiserver can call eternal resources
+ - authorization webhook (for when you wish to auth a request without setting up a new authorizer)
+ - images, other resources
+ - [FINDING] supports HTTP
+- Aggregate API server // Aggregator
+ - for adding externisbility resources
+ - a type of CRD, basically
+- component status -> reaches out to every component on the cluster
+- Network proxy: restrict outbound connections from kube-apiserver (currently no restriction)
+ - honestly a weakness: no egress filtering
+- Business logic in controllers, but kube-apiserver is info
+- cloud prociders, auth, &c
+- sharding by group version kind, put all KVKs into the same etcd
+- listeners: insecure and secure
+ - check if insecure is configured by default
+ - would be a finding if so
+- Not comfortable doing true multi-tenant on k8s
+- multi-single tenants (as in, if Pepsi wants to have marketing & accounting that's fine, but not Coke & Pepsi on the same cluster)
+- Best way to restrict access to kube-apiserver
+ - and working on a proxy as noted above
+- kube-apiserver is the root CA for *at least two* PKIs:
+ - two CAs, but not on by default w/o flags (check what happens w/o two CAs...)
+ - that would be a finding, if you can cross CAs really
+- TLS (multiple domains):
+ - etcd -> kube-apiserver
+ - the other is webhooks/kublet/components...
+- check secrets: can you tell k8s to encrypt a secret but not provide the flag? what does it do?
+- Alt route for secrets: volumes, write to a volume, then mount
+ - Can't really do much about that, since it's opaque to the kube-apiserver
+- ConfigMap: people can stuff secrets into ConfigMaps
+ - untyped data blob
+ - cannot encrypt
+ - recommend moving away from ConfigMaps
+- Logging to var log
+ - resource names in logs (namespace, secret name, &c). Can be sensitive
+ - [FINDING] no logs by default who did what
+ - need to turn on auditing for that
+ - look at metrics as well, similar to CRDs
+- Data Validation
+ - can have admission controller, webhooks, &c.
+ - everything goes through validation
+- Session
+ - upgrade to HTTP/2, channel, or SPDY
+ - JWT is long lived (we know)
+ - Certain requests like proxy and logs require upgrade to channels
+ - look at k8s enhancement ... kube-apiserver dot md
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+- in the version of k8s we are testing, no outbound limits on external connections
+
+## Cryptography
+
+- Not encrypting secrets in etcd by default
+- requiring [a command line flag](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#configuration-and-determining-whether-encryption-at-rest-is-already-enabled)
+- SUpports HTTP for Webhooks and comopnent status
+
+## Secrets Management
+
+## Authentication
+
+## Authorization
+
+## Multi-tenancy Isolation
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-proxy.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-proxy.md
new file mode 100644
index 000000000..521d0e839
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-proxy.md
@@ -0,0 +1,227 @@
+# Overview
+
+- Component: kube-proxy
+- Owner(s): [sig-network](https://github.com/kubernetes/community/tree/master/sig-network)
+- SIG/WG(s) at meeting:
+- Service Data Classification: Medium
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+- kubeproxy has several main modes of operation:
+ - as a literal network proxy, handling networking between nodes
+ - as a bridge between Container Network Interface (CNI) which handles the actual networking and the host operating system
+ - `iptables` mode
+ - `ipvs` mode
+ - two Microsoft Windows-specific modes (not covered by the RRA)
+- in any of these modes, kubeproxy interfaces with the host's routing table so as to achieve a seamless, flat network across the kubernetes cluster
+
+## Are there any subcomponents or shared boundaries?
+
+Yes.
+
+- Similar to kubelet, kube-proxy run's on the node, with an implicit trust boundary between Worker components and Container components (i.e. pods)
+
+## What communications protocols does it use?
+
+- Direct IPC to `iptables` or `ipvs`
+- HTTPS to the kube-apiserver
+- HTTP Healthz port (which is a literal counter plus a `200 Ok` response)
+
+## Where does it store data?
+
+Minimal data should be stored by kube-proxy itself, this should mainly be handled by kubelet and some file system configuration
+
+## What is the most sensitive data it stores?
+
+N/A
+
+## How is that data stored?
+
+N/A
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+- kube-proxy is actually five programs
+- proxy: mostly deprecated, but a literal proxy, in that it intercepts requests and proxies them to backend services
+- IPVS/iptables: very similar modes, handle connecting virtual IPs (VIPs) and the like via low-level routing (the preferred mode)
+- two Windows-specific modes (out of scope for this discussion, but if there are details we can certainly add them)
+
+Node ports:
+
+- captures traffic from Host IP
+- shuffles to backend (used for building load balancers)
+
+- kube-proxy shells out to `iptables` or `ipvs`
+- Also uses a netlink socket for IPVS (netlink are similar to Unix Domain Sockets)
+- *Also* shells out to `ipset` under certain circumstances for IPVS (building sets of IPs and such)
+
+
+### User space proxy
+
+Setup:
+
+1. Connect to the kube-apiserver
+1. Watch the API server for services/endpoints/&c
+1. Build in-memory caching map: for services, for every port a service maps, open a port, write iptables rule for VIP & Virt Port
+1. Watch for updates of services/endpoints/&c
+
+when a consumer connects to the port:
+
+1. Service is running VIP:VPort
+1. Root NS -> iptable -> kube-proxy port
+1. look at the src/dst port, check the map, pick a service on that port at random (if that fails, try another until either success or a retry count has exceeded)
+1. Shuffle bytes back and forth between backend service and client until termination or failure
+
+### iptables
+
+1. Same initial setup (sans opening a port directly)
+1. iptables restore command set
+1. giant string of services
+1. User VIP -> Random Backend -> Rewrite packets (at the kernel level, so kube-proxy never sees the data)
+1. At the end of the sync loop, write (write in batches to avoid iptables contentions)
+1. no more routing table touches until service updates (from watching kube-apiserver or a time out, expanded below)
+
+**NOTE**: rate limited (bounded frequency) updates:
+- no later than 10 minutes by default
+- no sooner than 15s by default (if there are no service map updates)
+
+this point came out of the following question: is having access to kube-proxy *worse* than having root access to the host machine?
+
+### ipvs
+
+1. Same setup as iptables & proxy mode
+1. `ipvsadm` and `ipset` commands instead of `iptables`
+1. This does have some strange changes:
+ - ip address needs a dummy adapter
+ - !NOTE Any service bound to 0.0.0.0 are also bound to _all_ adapters
+ - somewhat expected because 0.0.0.0, but can still lead to interesting behavior
+
+### concern points within networking
+
+- !NOTE: ARP table attacks (such as if someone has `CAP_NET_RAW` in a container or host access) can impact kube-proxy
+- Endpoint selection is namespace & pod-based, so injection could overwrite (I don't think this is worth a finding/note because kube-apiserver is the arbiter of truth)
+- !FINDING (but low...): POD IP Reuse: (factor of 2 x max) cause a machine to churn thru IPS, you could cause a kube-proxy to forward ports to your pod if you win the race condition.
+ - this would be limited to the window of routing updates
+ - however, established connections would remain
+ - kube-apiserver could be the arbiter of routing, but that may require more watch and connection to the central component
+ - [editor] I think just noting this potential issue and maybe warning on it in kube-proxy logs would be enough
+
+### with root access?
+
+Access to kube-proxy is mostly the same as root access
+
+- set syscalls, route local, &c could gobble memory
+- Node/VIP level
+- Recommend `CAP_NET_BIND` (bind to low ports, don't need root for certain users) for containers/pods, alleviate concerns there
+- Can map low ports to high ports in kube-proxy as well, but mucks with anything that pretends to be a VIP
+ - LB forwards packets to service without new connection (based on srcport)
+ - 2-hop LB, can't do direct LB
+
+## Cryptography
+
+- kube-proxy itself does not handle cryptography other than the TLS connection to kube-apiserver
+
+## Secrets Management
+
+- kube-proxy itself does not handle secrets, but rather only consumes credentials from the command line (like all other k8s components)
+
+## Authentication
+
+- kube-proxy does not handle any authentication other than credentials to the kube-apiserver
+
+## Authorization
+
+- kube-proxy does not handle any authorization; the arbiters of authorization are kubelet and kube-proxy
+
+## Multi-tenancy Isolation
+
+- kube-proxy does not currently segment clients from one another, as clients on the same pod/host must use the same iptables/ipvs configuration
+- kube-proxy does have conception of namespaces, but currently avoids enforcing much at that level
+ - routes still must be added to iptables or the like
+ - iptables contention could be problematic
+ - much better to handle at higher-level components, namely kube-apiserver and kube-proxy
+
+## Logging
+
+- stderr directed to a file
+- same as with kubelet
+- !FINDING (but same as all other components) logs namespaces, service names (same as every other service)
+
+# Additional Notes
+
+## kubelet to iptables
+
+- per pod network management
+- pods can request a host port, docker style
+- kubenet and CNI plugins
+- kubenet uses CNI
+- setup kubenet iptable to map ports to a single pod
+- overly broad, should be appended to iptables list
+- all local IPs to the host
+
+!FINDING: don't use host ports, they can cause problems with services and such; we may recommend deprecating them
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-scheduler.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-scheduler.md
new file mode 100644
index 000000000..5628f8d16
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/kube-scheduler.md
@@ -0,0 +1,162 @@
+# Overview
+
+- Component: kube-scheduler
+- Owner(s): [sig-scheduling](https://github.com/kubernetes/community/tree/master/sig-scheduling)
+- SIG/WG(s) at meeting:
+- Service Data Classifjcation: Moderate (the scheduler adds pods to nodes, but will not remove pods, for the most part)
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+- Similar to most other components:
+ 1. Watches for unscheduled/new pods
+ 1. Watches nodes with and their resource constraints
+ 1. Chooses a node, via various mechanisms, to allocate based on best fit of resource requirements
+ 1. Updates the pod spec on the kube-apiserver
+ 1. that update is then retrieved by the node, which is also Watching components via the kube-apiserver
+- there may be multiple schedulers with various names, and parameters (such as pod-specific schedulers)
+
+- !NOTE schedulers are coöperative
+- !NOTE schedulers are *supposed* to honor the name, but need not
+ - Interesting note, makes the huge list of schedulers DoS interesting
+ - !NOTE idea there was to add a *huge* number of pods to be scheduled that are associated with an poorly named scheduler
+ - !NOTE peopoe shouldn't request specific schedulers in podspec, rather, there should be some webhook to process that
+ - !NOTE team wasn't sure what would happen with large number of pods to be scheduled
+
+## Are there any subcomponents or shared boundaries?
+
+Yes
+
+- there may be multiple schedulers on the same MCP host
+- schedulers may run on the same host as the API server
+
+## What communications protocols does it use?
+
+- standard HTTPS + auth (chosen by the cluster)
+
+## Where does it store data?
+
+- most should be stored in etcd (via kube-apiserver)
+- some data will be stored on command line (configuration options) or on the file system (certificate paths for authentication)
+
+## What is the most sensitive data it stores?
+
+- No direct storage
+
+## How is that data stored?
+
+- N/A
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+- only talks to kube-apiserver
+- colocated on the same host generally as kube-apiserver, but needn't be
+- has a web server (HTTP)
+ - !FINDING: same HTTP server finding as all other components
+ - metrics endpoint: qps, scheduling latency, &c
+ - healthz endpoint, which is just a 200 Ok response
+ - by default doesn't verify cert (maybe)
+
+## Cryptography
+
+- None
+
+## Secrets Management
+
+- Logs is the only persistence mechanism
+- !FINDING (to be added to all the other "you expose secrets in env and CLI" finding locations) auth token/cred passed in via CLI
+
+## Authentication
+
+- no authN really
+- pods, nodes, related objects; doesn't deal in authN
+- unaware of any service/user accounts
+
+## Authorization
+
+- schedluinc concepts protected by authZ
+ - quotas
+ - priority classes
+ - &c
+- this authZ is not enforced by scheduler, however, enforced by kube-apiserver
+
+## Multi-tenancy Isolation
+
+- tenant: different users of workloads that don't want to trust one another
+- namespaces are usually the boundaries
+- affinity/anti-affinity for namespace
+- scheduler doesn't have data plan access
+- can have noisy neighbory problem
+ - is that the scheduler's issue?
+ - not sure
+ - namspace agnostic
+ - can use priority classes which can be RBAC'd to a specific namespace, like kube-system
+ - does not handle tenant fairness, handles priorty class fairness
+ - no visibility into network boundary or usage information
+ - no cgroup for network counts
+ - !FINDING anti-affinity can be abused: only I can have this one host, no one else, applicable from `kubectl`
+ - !NOTE no backoff process for scheduler to reschedule a rejected pod by the kublet; the replicaset controller can create a tightloop (RSC -> Scheduler -> Kubelet -> Reject -> RSC...)
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/kubelet.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/kubelet.md
new file mode 100644
index 000000000..ec972ded3
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/kubelet.md
@@ -0,0 +1,180 @@
+# Overview
+
+- Component: kubelet
+- Owner(s): [sig-node](https://github.com/kubernetes/community/tree/master/sig-node)
+- SIG/WG(s) at meeting:
+- Service Data Classification: High
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+- `kubelet` isses a watch request on the `kube-apiserver`
+- `kubelet` watches for pod allocations assigned to the node the kubelet is currently running on
+- when a new pod has been allocated for the kubelet's host, it retrieve the pod spec, and interacts with the Container Runtime via local Interprocess Communication to run the container
+- Kubelet also handles:
+ - answering log requests from the kube-apiserver
+ - monitoring pod health for failures
+ - working with the Container Runtime to deschedule pods when the pod has been deleted
+ - updating the kube-apiserver with host status (for use by the scheduler)
+
+## Are there any subcomponents or shared boundaries?
+
+Yes.
+
+- Technically, kubelet runs on the same host as the Container Runtime and kubeproxy
+- There is a Trust Zone boundary between the Container Runtime and the kubelet
+
+## What communications protocols does it use?
+
+- HTTPS with certificate validation and some authentication mechanism for communication with the kube-apiserver as a client
+- HTTPS without certificate validation by default
+
+## Where does it store data?
+
+- kubelet itself should not store much data
+- kubelet can be run in an "apiserver-less mode" that loads pod manifests from the file system
+- most data should be retrieved from the kube-apiserver via etcd
+- authentication credentials for the kube-apiserver may be stored on the file system or in memory (both in CLI parameter as well as actual program memory) for the duration of execution
+
+## What is the most sensitive data it stores?
+
+- authentication credentials are stored in memory or are out of scope
+
+## How is that data stored?
+
+N/A
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+- Post 10250: read/write, authenticated
+- Port 10255: read-only, unauthenticated
+ - cadvisor uses this, going to be deprecated
+- 10248: healthz, unauth'd
+- static pod manifest directory
+- Static pod fetch via HTTP(S)
+
+### Routes:
+
+- Auth filter on API, for 10250
+ - delegated to apiserver, subject access review, HTTPS request
+- `/pods` podspec on node -> leaks data
+- `/healthz`
+- `/spec`
+- `/stats-{cpu, mem, &c}`
+- on 10250 only:
+ - `/exec`
+ - `/attach`
+ - `portforward`
+ - `/kube-auth`
+ - `/debug-flags`
+ - `/cri/{exec, attach, portforward}`
+
+### Findings:
+
+- !FINDING: 10255 is unauthenticated and leaks secrets
+- !FINDING: 10255/10248
+- !FINDING: 10250 is self-signed TLS
+
+## Cryptography
+
+- None
+
+## Secrets Management
+
+- returned from kube-apiserver unencrypted
+- in memory cache
+- if pod mounts disk, written to tmpfs
+- !FINDING (already captured) ENV vars can expose secrets
+- configmaps are treated like secrets by kubelet
+- !FINDING keynames and secret names may be logged
+- maintains its own certs, secrets, bootstrap credential
+ - bootstrap: initial cert used to issue CSR to kube-apiserver
+ - !NOTE certs are written to disk unencrypted
+ - !FINDING bootstrap cert may be long lived, w/o a TTL
+
+## Authentication
+
+- delegated to kube-apiserver, via HTTPS request, with subject access review
+- two-way TLS by default (we believe)
+- token auth
+ - bearer token
+ - passed to request to API server
+ - "token review"
+ - kube-apiserver responds w/ ident
+ - response is boolean (yes/no is this a user) and username/uid/groups/arbitrary data as a tuple
+- no auditing on kublet, but logged on kube-apiserver
+
+## Authorization
+
+- delegated to kube-apiserver
+
+## Multi-tenancy Isolation
+
+- kube-apiserver is the arbiter
+- kubelet doesn't know namespaces really
+- every pod is a separate tenant
+- pods are security boundaries
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/ancillary-data/rapid-risk-assessments/template.md b/wg-security-audit/ancillary-data/rapid-risk-assessments/template.md
new file mode 100644
index 000000000..b3808395c
--- /dev/null
+++ b/wg-security-audit/ancillary-data/rapid-risk-assessments/template.md
@@ -0,0 +1,95 @@
+# Overview
+
+- Component:
+- Owner(s):
+- SIG/WG(s) at meeting:
+- Service Data Classification:
+- Highest Risk Impact:
+
+# Service Notes
+
+The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example
+a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing,
+and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration
+of a meeting/call.
+
+## How does the service work?
+
+## Are there any subcomponents or shared boundaries?
+
+## What communications protocols does it use?
+
+## Where does it store data?
+
+## What is the most sensitive data it stores?
+
+## How is that data stored?
+
+# Data Dictionary
+
+| Name | Classification/Sensitivity | Comments |
+| :--: | :--: | :--: |
+| Data | Goes | Here |
+
+# Control Families
+
+These are the areas of controls that we're interested in based on what the audit working group selected.
+
+When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI:
+
+> The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information.
+
+For example, an system may have authorization requirements that say:
+
+- users must be registered with a central authority
+- all requests must be verified to be owned by the requesting user
+- each account must have attributes associated with it to uniquely identify the user
+
+and so on.
+
+For this assessment, we're looking at six basic control families:
+
+- Networking
+- Cryptography
+- Secrets Management
+- Authentication
+- Authorization (Access Control)
+- Multi-tenancy Isolation
+
+Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example,
+something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this
+isn't a weakness, it's simply "not applicable."
+
+For each control family we want to ask:
+
+- What does the component do for this control?
+- What sorts of data passes through that control?
+ - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking
+- What can attacker do with access to this component?
+- What's the simplest attack against it?
+- Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")?
+- What happens if the component stops working (via DoS or other means)?
+- Have there been similar vulnerabilities in the past? What were the mitigations?
+
+# Threat Scenarios
+
+- An External Attacker without access to the client application
+- An External Attacker with valid access to the client application
+- An Internal Attacker with access to cluster
+- A Malicious Internal User
+
+## Networking
+
+## Cryptography
+
+## Secrets Management
+
+## Authentication
+
+## Authorization
+
+## Multi-tenancy Isolation
+
+## Summary
+
+# Recommendations
diff --git a/wg-security-audit/findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf b/wg-security-audit/findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf
new file mode 100644
index 000000000..65ab1e66d
Binary files /dev/null and b/wg-security-audit/findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf differ
diff --git a/wg-security-audit/findings/Kubernetes Final Report.pdf b/wg-security-audit/findings/Kubernetes Final Report.pdf
new file mode 100644
index 000000000..7af3e1fd4
Binary files /dev/null and b/wg-security-audit/findings/Kubernetes Final Report.pdf differ
diff --git a/wg-security-audit/findings/Kubernetes Threat Model.pdf b/wg-security-audit/findings/Kubernetes Threat Model.pdf
new file mode 100644
index 000000000..9f7be3eaa
Binary files /dev/null and b/wg-security-audit/findings/Kubernetes Threat Model.pdf differ
diff --git a/wg-security-audit/findings/Kubernetes White Paper.pdf b/wg-security-audit/findings/Kubernetes White Paper.pdf
new file mode 100644
index 000000000..867bb48ef
Binary files /dev/null and b/wg-security-audit/findings/Kubernetes White Paper.pdf differ