diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/_index.md index 9289d03cf..d74a41ca0 100644 --- a/content/rancher/v2.0-v2.4/en/backups/backup/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/backup/_index.md @@ -7,6 +7,8 @@ aliases: - /rancher/v2.0-v2.4/en/backups/backups - /rancher/v2.0-v2.4/en/backups/legacy/backup - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/ + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/ + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/ --- This section contains information about how to create backups of your Rancher data and how to restore them in a disaster scenario. diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md index 6f4d55c4b..e20ea4208 100644 --- a/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md @@ -9,6 +9,7 @@ aliases: - /rancher/v2.0-v2.4/en/backups/legacy/backup/single-node-backups/ - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/docker-backups - /rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/ + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/docker-backups/ --- diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md index b4394bad4..65fd599b0 100644 --- a/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md @@ -9,6 +9,7 @@ aliases: - /rancher/v2.0-v2.4/en/backups/legacy/backups/k3s-backups - /rancher/v2.0-v2.4/en/backups/legacy/backup/k3s-backups - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/k3s-backups + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/k3s-backups/ --- When Rancher is installed on a high-availability Kubernetes cluster, we recommend using an external database to store the cluster data. diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md index f8929f07d..a85625de7 100644 --- a/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md @@ -11,6 +11,7 @@ aliases: - /rancher/v2.0-v2.4/en/backups/legacy/backups/ha-backups - /rancher/v2.0-v2.4/en/backups/legacy/backup/ha-backups - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/ --- This section describes how to create backups of your high-availability Rancher install. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/_index.md index c3a573258..bb6569b5e 100644 --- a/content/rancher/v2.0-v2.4/en/backups/restore/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/restore/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/backups/restorations - /rancher/v2.0-v2.4/en/backups/legacy/restore - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/ --- If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md index c5c4d723f..1dd88ae4e 100644 --- a/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ - /rancher/v2.0-v2.4/en/backups/restorations/single-node-restoration - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/docker-restores + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/docker-restores/ --- If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/k3s-restore/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/k3s-restore/_index.md index fb315b5e2..2336cdcb3 100644 --- a/content/rancher/v2.0-v2.4/en/backups/restore/k3s-restore/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/restore/k3s-restore/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.0-v2.4/en/backups/legacy/restore/k8s-restore/k3s-restore/ - /rancher/v2.0-v2.4/en/backups/legacy/restore/k3s-restore - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/k3s-restore + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/k3s-restore/ --- When Rancher is installed on a high-availability Kubernetes cluster, we recommend using an external database to store the cluster data. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md index 631d43cae..c6de8e35c 100644 --- a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md @@ -9,6 +9,7 @@ aliases: - /rancher/v2.0-v2.4/en/backups/legacy/restore/k8s-restore/rke-restore/ - /rancher/v2.0-v2.4/en/backups/legacy/restore/rke-restore - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/rke-restore + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/ --- This procedure describes how to use RKE to restore a snapshot of the Rancher Kubernetes cluster. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md index 97e3968fd..b7026acc4 100644 --- a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md +++ b/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md @@ -1,6 +1,8 @@ --- title: "Rolling back to v2.0.0-v2.1.5" weight: 1 +aliases: + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1/ --- > Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved here and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. diff --git a/content/rancher/v2.0-v2.4/en/best-practices/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/_index.md index d015a7189..6ea4f98b9 100644 --- a/content/rancher/v2.0-v2.4/en/best-practices/_index.md +++ b/content/rancher/v2.0-v2.4/en/best-practices/_index.md @@ -1,6 +1,8 @@ --- title: Best Practices Guide weight: 4 +aliases: + - /rancher/v2.x/en/best-practices/v2.0-v2.4/ --- The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. diff --git a/content/rancher/v2.0-v2.4/en/best-practices/containers/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/containers/_index.md index 6eb905e1e..e92222db8 100644 --- a/content/rancher/v2.0-v2.4/en/best-practices/containers/_index.md +++ b/content/rancher/v2.0-v2.4/en/best-practices/containers/_index.md @@ -4,6 +4,7 @@ weight: 100 aliases: - /rancher/v2.0-v2.4/en/best-practices/containers - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/containers + - /rancher/v2.x/en/best-practices/v2.0-v2.4/containers/ --- Running well-built containers can greatly impact the overall performance and security of your environment. diff --git a/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md index d795146df..6a11761bf 100644 --- a/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md +++ b/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md @@ -4,6 +4,7 @@ weight: 100 aliases: - /rancher/v2.0-v2.4/en/best-practices/deployment-strategies - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/deployment-strategies + - /rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-strategies/ --- There are two recommended deployment strategies. Each one has its own pros and cons. Read more about which one would fit best for your use case: diff --git a/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md index f31b4d7e2..34f1f0c17 100644 --- a/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md +++ b/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md @@ -4,6 +4,7 @@ weight: 100 aliases: - /rancher/v2.0-v2.4/en/best-practices/deployment-types - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/deployment-types + - /rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-types/ --- A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. diff --git a/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md index eabc36c72..d85e5a22c 100644 --- a/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md +++ b/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md @@ -4,6 +4,7 @@ weight: 101 aliases: - /rancher/v2.0-v2.4/en/best-practices/management - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/management + - /rancher/v2.x/en/best-practices/v2.0-v2.4/management/ --- Rancher allows you to set up numerous combinations of configurations. Some configurations are more appropriate for development and testing, while there are other best practices for production environments for maximum availability and fault tolerance. The following best practices should be followed for production. diff --git a/content/rancher/v2.0-v2.4/en/cli/_index.md b/content/rancher/v2.0-v2.4/en/cli/_index.md index b78a30db6..2967263a9 100644 --- a/content/rancher/v2.0-v2.4/en/cli/_index.md +++ b/content/rancher/v2.0-v2.4/en/cli/_index.md @@ -6,6 +6,7 @@ metaDescription: "The Rancher CLI is a unified tool that you can use to interact weight: 21 aliases: - /rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cli + - /rancher/v2.x/en/cli/ --- The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md index 7024de3c0..b9c354f49 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md @@ -2,8 +2,9 @@ title: CIS Scans weight: 18 aliases: - - /rancher/v2.0-v2.4/en/cis-scans/legacy - - /rancher/v2.0-v2.4/en/cis-scans + - /rancher/v2.0-v2.4/en/cis-scans/legacy + - /rancher/v2.0-v2.4/en/cis-scans + - /rancher/v2.x/en/cis-scans/v2.4/ --- _Available as of v2.4.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/skipped-tests/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/skipped-tests/_index.md index 9c2b6b04e..b1a58cb69 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/skipped-tests/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/skipped-tests/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cis-scans/legacy/skipped-tests - /rancher/v2.0-v2.4/en/cis-scans/v2.4/skipped-tests - /rancher/v2.0-v2.4/en/cis-scans/skipped-tests + - /rancher/v2.x/en/cis-scans/v2.4/skipped-tests/ --- This section lists the tests that are skipped in the permissive test profile for RKE. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md index ad34cd8cd..67430e68c 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/alerts - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/cluster-alerts - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/ --- To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md index 44bb93520..60af88703 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md @@ -9,6 +9,8 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/ + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/ + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/ --- Logging is helpful because it allows you to: diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/elasticsearch/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/elasticsearch/_index.md index 744118995..293ea1849 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/elasticsearch/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/elasticsearch/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/elasticsearch - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/elasticsearch - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/elasticsearch + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/elasticsearch/ --- If your organization uses [Elasticsearch](https://www.elastic.co/), either on premise or in the cloud, you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Elasticsearch deployment to view logs. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/fluentd/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/fluentd/_index.md index c6b4660d2..200fb6c56 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/fluentd/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/fluentd/_index.md @@ -2,9 +2,10 @@ title: Fluentd weight: 600 aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/fluentd - - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/fluentd - - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/fluentd + - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/fluentd + - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/fluentd + - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/fluentd + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/fluentd/ --- If your organization uses [Fluentd](https://www.fluentd.org/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Fluentd server to view logs. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/kafka/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/kafka/_index.md index 6a94b79d5..ec15f61ea 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/kafka/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/kafka/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/kafka - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/kafka - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/kafka + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/kafka/ --- If your organization uses [Kafka](https://kafka.apache.org/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Kafka server to view logs. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md index 666fe7bcf..38fe73691 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/splunk - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk/ --- If your organization uses [Splunk](https://www.splunk.com/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Splunk server to view logs. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/syslog/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/syslog/_index.md index da112f8bd..9f959319c 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/syslog/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/syslog/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/syslog - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/syslog - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/syslog + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/syslog/ --- If your organization uses [Syslog](https://tools.ietf.org/html/rfc5424), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Syslog server to view logs. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md index 7d9c63813..d21612e27 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md @@ -8,6 +8,8 @@ aliases: - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/cluster-monitoring - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/ + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/ --- _Available as of v2.2.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md index e1b785ca3..c6ea196ad 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/cluster-metrics - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics/ --- _Available as of v2.2.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md index 44ce8ae12..f710ae39b 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/custom-metrics - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/custom-metrics/ - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/ --- After you've enabled [cluster level monitoring]({{< baseurl >}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md index 5cb11c74e..9109666fa 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/expression - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/ --- The PromQL expressions in this doc can be used to configure [alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md index ffc9a4da9..01c490eb0 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/prometheus/ - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus/ --- _Available as of v2.2.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md index 5b8948611..f3748f37e 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/viewing-metrics - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/viewing-metrics - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics/ --- _Available as of v2.2.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md index 0060dd5c8..4cbdfdd96 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/ - /rancher/v2.0-v2.4/en/project-admin/istio - /rancher/v2.0-v2.4/en/istio/legacy/cluster-istio + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/ --- _Available as of v2.3.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio/_index.md index 00e4a4acd..27c14e7ca 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio - /rancher/v2.0-v2.4/en/istio/legacy/disabling-istio - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/disabling-istio + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/disabling-istio/ --- This section describes how to disable Istio in a cluster, namespace, or workload. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/_index.md index d2c06a6c6..498dfa16f 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac - /rancher/v2.0-v2.4/en/istio/legacy/rbac - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/rbac + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/rbac/ --- This section describes the permissions required to access Istio features and how to configure access to the Kiali and Jaeger visualizations. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes/_index.md index 937d6db32..2fd7ad70c 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes/_index.md @@ -4,6 +4,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes - /rancher/v2.0-v2.4/en/istio/legacy/release-notes - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/release-notes + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/release-notes/ --- diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/_index.md index da25577d8..8f4ea071e 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources - /rancher/v2.0-v2.4/en/istio/legacy/resources - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/resources + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/resources/ --- _Available as of v2.3.0_ diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md index 7eaeb1ac1..3f44270f3 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup - /rancher/v2.0-v2.4/en/istio/legacy/setup - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/ --- This section describes how to enable Istio and start using it in your projects. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md index c01d4edb1..fa88cd285 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads - /rancher/v2.0-v2.4/en/istio/legacy/setup/deploy-workloads - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads/ --- > **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md index 73c7842d9..c7a06c44a 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-cluster - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/ --- This cluster uses the default Nginx controller to allow traffic into the cluster. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md index e222a8568..d3a8130ac 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md @@ -4,6 +4,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp/ --- >**Note:** The following guide is only for RKE provisioned clusters. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md index 854921e59..97a725a79 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-namespace - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/ --- You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md index 78a2d83ca..7bd777e23 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway - /rancher/v2.0-v2.4/en/istio/legacy/setup/gateway - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/gateway + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/gateway/ --- The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md index fdc0982da..cae0c5936 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors - /rancher/v2.0-v2.4/en/istio/legacy/setup/node-selectors - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/node-selectors + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/ --- > **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md index 22fee4590..003ec7c71 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management - /rancher/v2.0-v2.4/en/istio/legacy/setup/set-up-traffic-management - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management/ --- A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic/_index.md index ed3e20002..566f6aef2 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic - /rancher/v2.0-v2.4/en/istio/legacy/setup/view-traffic - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/view-traffic + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/view-traffic/ --- This section describes how to view the traffic that is being managed by Istio. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md index 25dd8b254..1d4887810 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md @@ -6,6 +6,8 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers/ + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts/ --- Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md index 9801050ad..f66a4fa79 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md @@ -2,6 +2,8 @@ title: vSphere Node Template Configuration in Rancher before v2.0.4 shortTitle: Before v2.0.4 weight: 5 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/ --- - [Account access](#account-access) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md index f53ea2087..658c575ea 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md @@ -2,6 +2,8 @@ title: vSphere Node Template Configuration in Rancher v2.0.4 shortTitle: v2.0.4 weight: 4 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/ --- - [Account access](#account-access) - [Scheduling](#scheduling) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md index 3d02d1c3f..feab925f5 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md @@ -2,6 +2,8 @@ title: vSphere Node Template Configuration in Rancher v2.2.0 shortTitle: v2.2.0 weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/ --- - [Account Access](#account-access) - [Scheduling](#scheduling) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md index d2c31a9b0..829e7edea 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md @@ -2,6 +2,8 @@ title: vSphere Node Template Configuration in Rancher v2.3.0 shortTitle: v2.3.0 weight: 2 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/ --- - [Account Access](#account-access) - [Scheduling](#scheduling) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md index c6221e65d..9b4c53903 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md @@ -2,6 +2,8 @@ title: vSphere Node Template Configuration in Rancher v2.3.3 shortTitle: v2.3.3 weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/ --- - [Account Access](#account-access) - [Scheduling](#scheduling) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md index 68a568e14..c1f5ce3ba 100644 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md @@ -1,6 +1,8 @@ --- title: v2.1.x and v2.2.x Windows Documentation (Experimental) weight: 9100 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/ --- _Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ diff --git a/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md b/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md index e0aa7ff6a..db5cc1e85 100644 --- a/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md +++ b/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md @@ -1,6 +1,8 @@ --- title: Questions about Upgrading to Rancher v2.x weight: 1 +aliases: + - /rancher/v2.x/en/faq/upgrades-to-2x/ --- This page contains frequently asked questions about the changes between Rancher v1.x and v2.x, and how to upgrade from Rancher v1.x to v2.x. diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md index c86106ec7..7895c69d7 100644 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2 - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha/helm2 - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/helm2 + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/helm2/ --- > Helm 3 has been released. If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md index 9bde3e6da..50b771e62 100644 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/migrating-from-rke-add-on - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/migrating-from-rke-add-on + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/ --- > **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md index 1f1d212a2..773bb97f9 100644 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/upgrades/upgrades/namespace-migration - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/namespace-migration - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/namespace-migration + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/ --- >This section applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md index be2d6ede3..40243da33 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/ - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/ - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2 + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/ --- > After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md index 061250246..5ba256893 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher - /rancher/v2.0-v2.4/en/installation/air-gap/install-rancher - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/install-rancher + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/install-rancher/ --- This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md index e0d1d69cf..afac79574 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md @@ -4,6 +4,7 @@ weight: 300 aliases: - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-kube - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/launch-kubernetes + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/ --- This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md index ae22120aa..75e024e21 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/populate-private-registry + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/ --- > **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md index 8d2539a4c..71c94aecb 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/provision-host - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/prepare-nodes + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/ --- This section is about how to prepare your node(s) to install Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md index 280c5bd83..c546bb51f 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md @@ -3,6 +3,7 @@ title: Template for an RKE Cluster with a Certificate Signed by Recognized CA an weight: 3 aliases: - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/ --- RKE uses a cluster.yml file to install and configure your Kubernetes cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md index f81596826..9f7552a58 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md @@ -3,6 +3,7 @@ title: Template for an RKE Cluster with a Self-signed Certificate and Layer 4 Lo weight: 2 aliases: - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/ --- RKE uses a cluster.yml file to install and configure your Kubernetes cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md index c260a3e5e..8b2e38ac1 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md @@ -3,6 +3,7 @@ title: Template for an RKE Cluster with a Self-signed Certificate and SSL Termin weight: 3 aliases: - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/ --- RKE uses a cluster.yml file to install and configure your Kubernetes cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md index 04f5d0635..ee5d81eae 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md @@ -3,6 +3,7 @@ title: Template for an RKE Cluster with a Recognized CA Certificate and SSL Term weight: 4 aliases: - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/ --- RKE uses a cluster.yml file to install and configure your Kubernetes cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md index 2310bb55f..552053f18 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md @@ -3,6 +3,7 @@ title: Kubernetes Installation Using Helm 2 weight: 1 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2 + - /rancher/v2.x/en/installation/resources/advanced/helm2/ --- > After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md index 871799100..87cbb05f5 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md @@ -3,6 +3,7 @@ title: "1. Create Nodes and Load Balancer" weight: 185 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb + - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/ --- Use your provider of choice to provision 3 nodes and a Load Balancer endpoint for your RKE install. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md index 39af4c877..b81f53034 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md @@ -3,6 +3,7 @@ title: NGINX weight: 270 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nginx + - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/ --- NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md index bb050e3dd..4f15be045 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md @@ -3,6 +3,7 @@ title: Amazon NLB weight: 277 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nlb + - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/ --- ## Objectives diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md index fcbb07246..f31863089 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md @@ -4,6 +4,7 @@ description: "With Helm, you can create configurable deployments instead of usin weight: 195 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/ --- Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md index 2a3f8e230..789e01310 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md @@ -3,6 +3,7 @@ title: Troubleshooting weight: 276 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/troubleshooting + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/troubleshooting/ --- ### Helm commands show forbidden diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md index 1574a0f78..985141b07 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md @@ -3,6 +3,7 @@ title: "4. Install Rancher" weight: 200 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/ --- Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md index b8e3a5948..e13a88a1a 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md @@ -3,6 +3,7 @@ title: Chart Options weight: 276 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/chart-options/ --- ### Common Options diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md index 0eb25cd3a..3f964fa14 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md @@ -4,6 +4,7 @@ description: Read about how to populate the Kubernetes TLS secret for a Rancher weight: 276 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/tls-secrets + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/ --- Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md index a9e4fedd8..df9f7d23e 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md @@ -3,6 +3,7 @@ title: Troubleshooting weight: 276 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/troubleshooting + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/ --- ### Where is everything diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md index f713be031..aeb3d54a9 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md @@ -3,6 +3,7 @@ title: "2. Install Kubernetes with RKE" weight: 190 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke + - /rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/ --- Use RKE to install Kubernetes with a high availability etcd configuration. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md index ef5f43302..81ec584c6 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md @@ -3,6 +3,7 @@ title: Troubleshooting weight: 276 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/troubleshooting + - /rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/ --- ### canal Pods show READY 2/3 diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md index 9f80b7463..a6989a9fe 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md @@ -3,6 +3,7 @@ title: RKE Add-On Install weight: 276 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md index ac3db04bf..9425665cd 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md @@ -4,6 +4,7 @@ weight: 300 aliases: - /rke/latest/en/config-options/add-ons/api-auditing/ - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/ --- >**Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md index e0a651ba1..7a84ec936 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md @@ -3,6 +3,7 @@ title: Kubernetes Install with External Load Balancer (TCP/Layer 4) weight: 275 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md index fd17fdf20..c8b155bb6 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md @@ -2,8 +2,9 @@ title: Amazon NLB Configuration weight: 277 aliases: -- /rancher/v2.0-v2.4/en/installation/ha-server-install/nlb/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb + - /rancher/v2.0-v2.4/en/installation/ha-server-install/nlb/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md index 0ad8d2520..0dbb1290d 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md @@ -2,8 +2,9 @@ title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) weight: 276 aliases: -- /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb + - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md index 284311519..cda6cd4f1 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md @@ -2,8 +2,9 @@ title: Amazon ALB Configuration weight: 277 aliases: -- /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/alb/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/alb + - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/alb/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/alb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md index df7af681f..c1e1c8024 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md @@ -2,8 +2,9 @@ title: NGINX Configuration weight: 277 aliases: -- /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/nginx/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx + - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/nginx/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md index 7f41f57db..80cf52b95 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md @@ -3,6 +3,7 @@ title: HTTP Proxy Configuration weight: 277 aliases: - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/proxy + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/proxy/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md index 2387e81fd..0c2697ec1 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md @@ -2,9 +2,10 @@ title: 404 - default backend weight: 30 aliases: -- /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/404-default-backend/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend -- /404-default-backend/ + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/404-default-backend/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend + - /404-default-backend/ + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md index 35978bf4a..aa383d059 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md @@ -2,9 +2,10 @@ title: Troubleshooting HA RKE Add-On Install weight: 370 aliases: -- /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting -- /rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting + - /rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md index 0f2a369e0..9019f0b73 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md @@ -2,8 +2,9 @@ title: Generic troubleshooting weight: 5 aliases: -- /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/generic-troubleshooting/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/generic-troubleshooting/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md index 782424bb4..f6591e3cd 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md @@ -2,8 +2,9 @@ title: Failed to get job complete status weight: 20 aliases: -- /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/job-complete-status/ -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/job-complete-status/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md index 297d95a5e..cb9001f34 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md @@ -2,9 +2,10 @@ title: Kubernetes Install with External Load Balancer (TCP/Layer 4) weight: 275 aliases: -- /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-4-lb -- /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb -- /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-4-lb + - /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-4-lb + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb + - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-4-lb + - /rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-4-lb/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md index 653c23d9f..236813e01 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md @@ -2,9 +2,10 @@ title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) weight: 276 aliases: -- /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-7-lb -- /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb/ -- /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb + - /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-7-lb + - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb/ + - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb + - /rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-7-lb/ --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md index 0bd203970..aba42eb19 100644 --- a/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md @@ -4,6 +4,7 @@ weight: 2040 aliases: - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions - /rancher/v2.0-v2.4/en/installation/resources/choosing-version/encryption/upgrading-cert-manager/helm-2-instructions + - /rancher/v2.x/en/installation/resources/upgrading-cert-manager/helm-2-instructions/ --- Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md index 32a3e6c0d..2c8197693 100644 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md +++ b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md @@ -3,6 +3,7 @@ title: Manual HPA Installation for Clusters Created Before Rancher v2.0.7 weight: 3050 aliases: - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-for-rancher-before-2_0_7 + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/ --- This section describes how to manually install HPAs for clusters created with Rancher before v2.0.7. This section also describes how to configure your HPA to scale up or down, and how to assign roles to your HPA. diff --git a/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md index 322a32e6e..e4c584e74 100644 --- a/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md +++ b/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/project-admin/tools/pipelines/docs-for-v2.0.x - /rancher/v2.0-v2.4/en/project-admin/pipelines/docs-for-v2.0.x - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/docs-for-v2.0.x + - /rancher/v2.x/en/pipelines/docs-for-v2.0.x/ --- >**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/). diff --git a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md index badf94d9c..e3710de9d 100644 --- a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md +++ b/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/project-admin/tools/alerts - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/project-alerts - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts/ --- To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md index 6cb0f5cda..c4c54f55e 100644 --- a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md +++ b/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md @@ -6,6 +6,8 @@ aliases: - /rancher/v2.0-v2.4/en/project-admin/tools/logging - /rancher/v2.0-v2.4/en/logging/legacy/project-logging - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/project-logging + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/project-logging/ + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring/ --- Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md index 31ca2f58b..8e443a499 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.1 weight: 5 +aliases: + - /rancher/v2.x/en/security/rancher-2.1/ --- ### Self Assessment Guide diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/benchmark-2.1/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/benchmark-2.1/_index.md index 7931a1d5c..15b400c43 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/benchmark-2.1/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/benchmark-2.1/_index.md @@ -3,6 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide v2.1 weight: 209 aliases: - /rancher/v2.0-v2.4/en/security/benchmark-2.1 + - /rancher/v2.x/en/security/rancher-2.1/benchmark-2.1/ --- This document is a companion to the Rancher v2.1 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md index 1dcbee095..aedd034a5 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md @@ -3,6 +3,7 @@ title: Hardening Guide v2.1 weight: 104 aliases: - /rancher/v2.0-v2.4/en/security/hardening-2.1 + - /rancher/v2.x/en/security/rancher-2.1/hardening-2.1/ --- This document provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md index 457ecb447..a485c7073 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.2 weight: 4 +aliases: + - /rancher/v2.x/en/security/rancher-2.2/ --- ### Self Assessment Guide diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/benchmark-2.2/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/benchmark-2.2/_index.md index 0bbfd78bd..7d719a872 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/benchmark-2.2/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/benchmark-2.2/_index.md @@ -3,6 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide v2.2 weight: 208 aliases: - /rancher/v2.0-v2.4/en/security/benchmark-2.2 + - /rancher/v2.x/en/security/rancher-2.2/benchmark-2.2/ --- This document is a companion to the Rancher v2.2 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md index f3faefd78..768e53ecc 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md @@ -3,6 +3,7 @@ title: Hardening Guide v2.2 weight: 103 aliases: - /rancher/v2.0-v2.4/en/security/hardening-2.2 + - /rancher/v2.x/en/security/rancher-2.2/hardening-2.2/ --- This document provides prescriptive guidance for hardening a production installation of Rancher v2.2.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md index 0f3f04da6..e50a8c2f1 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.3.x weight: 3 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/ --- The relevant Hardening Guide and Self Assessment guide depends on your Rancher version: diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md index aa31c9c9a..589714665 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.3.0 weight: 3 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/ --- ### Self Assessment Guide diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md index 06b216c69..6c0fbc2b9 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md @@ -3,6 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide v2.3 weight: 207 aliases: - /rancher/v2.0-v2.4/en/security/benchmark-2.3 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/ --- This document is a companion to the Rancher v2.3 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md index 8f5fe7cd3..7f77f1254 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md @@ -3,6 +3,7 @@ title: Hardening Guide v2.3 weight: 102 aliases: - /rancher/v2.0-v2.4/en/security/hardening-2.3 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/ --- This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.0-v2.3.2. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md index 77c1c408a..98c78426c 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.3.3 weight: 2 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/ --- ### Self Assessment Guide diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md index f389fb30c..ae53043ca 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md @@ -3,6 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.3 weight: 206 aliases: - /rancher/v2.0-v2.4/en/security/benchmark-2.3.3 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/ --- This document is a companion to the Rancher v2.3.3 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md index e0bbfa595..bf75ee6a0 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md @@ -3,6 +3,7 @@ title: Hardening Guide v2.3.3 weight: 101 aliases: - /rancher/v2.0-v2.4/en/security/hardening-2.3.3 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/ --- This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.3. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md index d6bbefc79..e6b4582af 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.3.5 weight: 1 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/ --- ### Self Assessment Guide diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md index 4165562c1..3fbb7f27f 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md @@ -3,6 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide - v2.3.5 weight: 205 aliases: - /rancher/v2.0-v2.4/en/security/benchmark-2.3.5 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/ --- ### CIS Kubernetes Benchmark v1.5 - Rancher v2.3.5 with Kubernetes v1.15 diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md index 1ff85295f..7065ae7b9 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md @@ -3,6 +3,7 @@ title: Hardening Guide v2.3.5 weight: 100 aliases: - /rancher/v2.0-v2.4/en/security/hardening-2.3.5 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/ --- This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md index 67cda4137..137759fe7 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md @@ -1,6 +1,8 @@ --- title: Rancher v2.4 weight: 2 +aliases: + - /rancher/v2.x/en/security/rancher-2.4/ --- ### Self Assessment Guide diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md index 991ff28b7..f1e0767ab 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md @@ -3,6 +3,7 @@ title: CIS Benchmark Rancher Self-Assessment Guide - v2.4 weight: 204 aliases: - /rancher/v2.0-v2.4/en/security/benchmark-2.4 + - /rancher/v2.x/en/security/rancher-2.4/benchmark-2.4/ --- ### CIS Kubernetes Benchmark v1.5 - Rancher v2.4 with Kubernetes v1.15 diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md index 71e4af9ed..31f5017ac 100644 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md @@ -3,6 +3,7 @@ title: Hardening Guide v2.4 weight: 99 aliases: - /rancher/v2.0-v2.4/en/security/hardening-2.4 + - /rancher/v2.x/en/security/rancher-2.4/hardening-2.4/ --- This document provides prescriptive guidance for hardening a production installation of Rancher v2.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md b/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md index 44e74c02e..6cca088dc 100644 --- a/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md +++ b/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md @@ -1,6 +1,8 @@ --- title: Security Scans weight: 299 +aliases: + - /rancher/v2.x/en/security/security-scan/ --- The documentation about CIS security scans has moved [here.]({{}}/rancher/v2.0-v2.4/en/cis-scans) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md index f0375e50a..0d26be4f7 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md @@ -1,6 +1,8 @@ --- title: Migrating from v1.6 to v2.x weight: 28 +aliases: + - /rancher/v2.x/en/v1.6-migration/ --- Rancher v2.x has been rearchitected and rewritten with the goal of providing a complete management solution for Kubernetes and Docker. Due to these extensive changes, there is no direct upgrade path from v1.6 to v2.x, but rather a migration of your v1.6 services into v2.x as Kubernetes workloads. In v1.6, the most common orchestration used was Rancher's own engine called Cattle. The following guide explains and educates our Cattle users on running workloads in a Kubernetes environment. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md index f674af8f1..426bf5040 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md @@ -1,6 +1,8 @@ --- title: "6. Service Discovery" weight: 600 +aliases: + - /rancher/v2.x/en/v1.6-migration/discover-services/ --- Service discovery is one of the core functionalities of any container-based environment. Once you have packaged and launched your application, the next step is making it discoverable to other containers in your environment or the external world. This document will describe how to use the service discovery support provided by Rancher v2.x so that you can find them by name. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md index 028f71b7e..f2bdd0890 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md @@ -1,6 +1,8 @@ --- title: "3. Expose Your Services" weight: 400 +aliases: + - /rancher/v2.x/en/v1.6-migration/expose-services/ --- In testing environments, you usually need to route external traffic to your cluster containers by using an unadvertised IP and port number, providing users access to their apps. You can accomplish this goal using port mapping, which exposes a workload (i.e., service) publicly over a specific port, provided you know your node IP address(es). You can either map a port using HostPorts (which exposes a service on a specified port on a single node) or NodePorts (which exposes a service on _all_ nodes on a single port). diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md index 132b591c7..6a0e7714a 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md @@ -1,6 +1,8 @@ --- title: "1. Get Started" weight: 25 +aliases: + - /rancher/v2.x/en/v1.6-migration/get-started/ --- Get started with your migration to Rancher v2.x by installing Rancher and configuring your new Rancher environment. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md index f913d51b9..bf88c6a2b 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md @@ -1,6 +1,8 @@ --- title: Kubernetes Introduction weight: 1 +aliases: + - /rancher/v2.x/en/v1.6-migration/kub-intro/ --- Rancher v2.x is built on the [Kubernetes](https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational) container orchestrator. This shift in underlying technology for v2.x is a large departure from v1.6, which supported several popular container orchestrators. Since Rancher is now based entirely on Kubernetes, it's helpful to learn the Kubernetes basics. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md index 4b9c8482a..e740ca3d4 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md @@ -1,6 +1,8 @@ --- title: "7. Load Balancing" weight: 700 +aliases: + - /rancher/v2.x/en/v1.6-migration/load-balancing/ --- If your applications are public-facing and consume significant traffic, you should place a load balancer in front of your cluster so that users can always access their apps without service interruption. Typically, you can fulfill a high volume of service requests by [horizontally scaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) your deployment, which spins up additional application containers as traffic ramps up. However, this technique requires routing that distributes traffic across your nodes efficiently. In cases where you need to accommodate public traffic that scales up and down, you'll need a load balancer. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md index 6a7308d5d..da5d465c8 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md @@ -1,6 +1,8 @@ --- title: "4. Configure Health Checks" weight: 400 +aliases: + - /rancher/v2.x/en/v1.6-migration/monitor-apps/ --- Rancher v1.6 provided TCP and HTTP health checks on your nodes and services using its own health check microservice. These health checks monitored your containers to confirm they're operating as intended. If a container failed a health check, Rancher would destroy the unhealthy container and then replicates a healthy one to replace it. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md index 08922d054..c540b32b4 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md @@ -1,6 +1,8 @@ --- title: 2. Migrate Your Services weight: 100 +aliases: + - /rancher/v2.x/en/v1.6-migration/run-migration-tool/ --- Although your services from v1.6 won't work in Rancher v2.x by default, that doesn't mean you have to start again from square one, manually rebuilding your applications in v2.x. To help with migration from v1.6 to v2.x, Rancher has developed a migration tool. The migration-tools CLI is a utility that helps you recreate your applications in Rancher v2.x. This tool exports your Rancher v1.6 services as Compose files and converts them to a Kubernetes manifest that Rancher v2.x can consume. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md index 56fc0a817..6f5a6f8df 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md @@ -1,6 +1,8 @@ --- title: Migration Tools CLI Reference weight: 100 +aliases: + - /rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/ --- The migration-tools CLI includes multiple commands and options to assist your migration from Rancher v1.6 to Rancher v2.x. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md index af3b3dc51..b993d1eec 100644 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md +++ b/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md @@ -1,6 +1,8 @@ --- title: "5. Schedule Your Services" weight: 500 +aliases: + - /rancher/v2.x/en/v1.6-migration/schedule-workloads/ --- In v1.6, objects called _services_ were used to schedule containers to your cluster hosts. Services included the Docker image for an application, along with configuration settings for a desired state. diff --git a/content/rancher/v2.5/en/_index.md b/content/rancher/v2.5/en/_index.md index 72de46537..513fb4d98 100644 --- a/content/rancher/v2.5/en/_index.md +++ b/content/rancher/v2.5/en/_index.md @@ -1,6 +1,6 @@ --- -title: "Rancher 2.5.7-2.5.9" -shortTitle: "Rancher 2.5.7-2.5.8+" +title: "Rancher 2.5" +shortTitle: "Rancher 2.5" description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." metaTitle: "Rancher 2.5.7-2.5.9 Docs: What is New?" metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." diff --git a/content/rancher/v2.5/en/admin-settings/_index.md b/content/rancher/v2.5/en/admin-settings/_index.md index be8eec446..0ff5e139b 100644 --- a/content/rancher/v2.5/en/admin-settings/_index.md +++ b/content/rancher/v2.5/en/admin-settings/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.5/en/concepts/global-configuration/server-url/ - /rancher/v2.5/en/tasks/global-configuration/server-url/ - /rancher/v2.5/en/admin-settings/log-in/ + - /rancher/v2.x/en/admin-settings/ --- After installation, the [system administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/_index.md index 53c03cedc..e52fb47a0 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/_index.md @@ -2,8 +2,9 @@ title: Authentication weight: 1115 aliases: - - /rancher/v2.5/en/concepts/global-configuration/authentication/ - - /rancher/v2.5/en/tasks/global-configuration/authentication/ + - /rancher/v2.5/en/concepts/global-configuration/authentication/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/ + - /rancher/v2.x/en/admin-settings/authentication/ --- One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md index bffe3d786..0cce01d44 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md @@ -2,7 +2,8 @@ title: Configuring Active Directory (AD) weight: 1112 aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/active-directory/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/active-directory/ + - /rancher/v2.x/en/admin-settings/authentication/ad/ --- If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md index 6330a68f9..33ea20aa3 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md @@ -2,7 +2,8 @@ title: Configuring Azure AD weight: 1115 aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/azure-ad/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/azure-ad/ + - /rancher/v2.x/en/admin-settings/authentication/azure-ad/ --- If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md index 12c5404d5..791d3f5a6 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md @@ -2,7 +2,8 @@ title: Configuring FreeIPA weight: 1114 aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/freeipa/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/freeipa/ + - /rancher/v2.x/en/admin-settings/authentication/freeipa/ --- If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md index 5344cfa97..4ea86b7b4 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md @@ -2,7 +2,8 @@ title: Configuring GitHub weight: 1116 aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/github/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/github/ + - /rancher/v2.x/en/admin-settings/authentication/github/ --- In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md index 32c032439..9738dd3b6 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md @@ -1,5 +1,8 @@ --- title: Configuring Google OAuth +weight: 15 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/google/ --- If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md index b4cde2682..95a37c00e 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md @@ -2,6 +2,8 @@ title: Configuring Keycloak (SAML) description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins weight: 1200 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/keycloak/ --- If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/local/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/local/_index.md index 56142b143..2aabd06d3 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/local/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/local/_index.md @@ -2,7 +2,8 @@ title: Local Authentication weight: 1111 aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/local-authentication/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/local-authentication/ + - /rancher/v2.x/en/admin-settings/authentication/local/ --- Local authentication is the default until you configure an external authentication provider. Local authentication is where Rancher stores the user information, i.e. names and passwords, of who can log in to Rancher. By default, the `admin` user that logs in to Rancher for the first time is a local user. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md index 7ed0a3738..3ea430255 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md @@ -1,6 +1,8 @@ --- title: Configuring Microsoft Active Directory Federation Service (SAML) weight: 1205 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/ --- If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md index d442e6476..30e687753 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md @@ -1,6 +1,8 @@ --- title: 1. Configuring Microsoft AD FS for Rancher weight: 1205 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/ --- Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md index 0f36d2631..0a7d12141 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md @@ -1,6 +1,8 @@ --- title: 2. Configuring Rancher for Microsoft AD FS weight: 1205 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/ --- After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/okta/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/okta/_index.md index b95792ce5..acb55aa39 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/okta/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/okta/_index.md @@ -1,6 +1,8 @@ --- title: Configuring Okta (SAML) weight: 1210 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/okta/ --- If your organization uses Okta Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md index 91894d204..0d8c49afe 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md @@ -2,7 +2,8 @@ title: Configuring OpenLDAP weight: 1113 aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/openldap/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/openldap/ + - /rancher/v2.x/en/admin-settings/authentication/openldap/ --- If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md index 0162a9fe5..5a12e5f78 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md @@ -1,6 +1,8 @@ --- title: OpenLDAP Configuration Reference weight: 2 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config/ --- This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/ping-federate/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/ping-federate/_index.md index 5d60f2f28..87abce2b8 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/ping-federate/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/ping-federate/_index.md @@ -1,6 +1,8 @@ --- title: Configuring PingIdentity (SAML) weight: 1200 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/ping-federate/ --- If your organization uses Ping Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md index fe00899aa..3d74de505 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md @@ -1,6 +1,8 @@ --- title: Configuring Shibboleth (SAML) weight: 1210 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/shibboleth/ --- If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md index fc8797e82..7d69442ec 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md @@ -1,6 +1,8 @@ --- title: Group Permissions with Shibboleth and OpenLDAP weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/shibboleth/about/ --- This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md index 148047cc7..8f708809a 100644 --- a/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md +++ b/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md @@ -1,6 +1,8 @@ --- title: Users and Groups weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/user-groups/ --- Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. diff --git a/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md b/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md index f78710512..378d84aea 100644 --- a/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md +++ b/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md @@ -2,6 +2,7 @@ title: Configuring a Global Default Private Registry weight: 400 aliases: + - /rancher/v2.x/en/admin-settings/config-private-registry/ --- You might want to use a private Docker registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the Docker images that are used in your clusters. diff --git a/content/rancher/v2.5/en/admin-settings/drivers/_index.md b/content/rancher/v2.5/en/admin-settings/drivers/_index.md index e86b45453..471124f63 100644 --- a/content/rancher/v2.5/en/admin-settings/drivers/_index.md +++ b/content/rancher/v2.5/en/admin-settings/drivers/_index.md @@ -1,6 +1,8 @@ --- title: Provisioning Drivers weight: 1140 +aliases: + - /rancher/v2.x/en/admin-settings/drivers/ --- Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. diff --git a/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md b/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md index 3cfba1c27..b5ea85f5f 100644 --- a/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md +++ b/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md @@ -1,6 +1,8 @@ --- title: Cluster Drivers -weight: 1 +weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/drivers/cluster-drivers/ --- Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md b/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md index 7161b4e14..52b2c1efd 100644 --- a/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md +++ b/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md @@ -4,6 +4,7 @@ weight: 2 aliases: - /rancher/v2.5/en/concepts/global-configuration/node-drivers/ - /rancher/v2.5/en/tasks/global-configuration/node-drivers/ + - /rancher/v2.x/en/admin-settings/drivers/node-drivers/ --- Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md b/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md index c6c5737ea..f377c0e62 100644 --- a/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md +++ b/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md @@ -1,6 +1,8 @@ --- title: Upgrading Kubernetes without Upgrading Rancher weight: 1120 +aliases: + - /rancher/v2.x/en/admin-settings/k8s-metadata/ --- The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. diff --git a/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md index 2c31ddb8c..16746c823 100644 --- a/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md +++ b/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/concepts/global-configuration/pod-security-policies/ - /rancher/v2.5/en/tasks/global-configuration/pod-security-policies/ - /rancher/v2.5/en/tasks/clusters/adding-a-pod-security-policy/ + - /rancher/v2.x/en/admin-settings/pod-security-policies/ --- _Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). diff --git a/content/rancher/v2.5/en/admin-settings/rbac/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/_index.md index 31b40d250..5fd866fd8 100644 --- a/content/rancher/v2.5/en/admin-settings/rbac/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rbac/_index.md @@ -2,7 +2,8 @@ title: Role-Based Access Control (RBAC) weight: 1120 aliases: - - /rancher/v2.5/en/concepts/global-configuration/users-permissions-roles/ + - /rancher/v2.5/en/concepts/global-configuration/users-permissions-roles/ + - /rancher/v2.x/en/admin-settings/rbac/ --- Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.5/en/admin-settings/authentication/), users can either be local or external. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md index 10e2d21fc..6c82e42e7 100644 --- a/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md @@ -1,6 +1,8 @@ --- title: Cluster and Project Roles weight: 1127 +aliases: + - /rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/ --- Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md index 5f74bccd0..a2b2795e1 100644 --- a/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md @@ -3,6 +3,7 @@ title: Custom Roles weight: 1128 aliases: - /rancher/v2.5/en/tasks/global-configuration/roles/ + - /rancher/v2.x/en/admin-settings/rbac/default-custom-roles/ --- Within Rancher, _roles_ determine what actions a user can make within a cluster or project. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md index 101eb080f..eef72464c 100644 --- a/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md @@ -1,6 +1,8 @@ --- title: Global Permissions weight: 1126 +aliases: + - /rancher/v2.x/en/admin-settings/rbac/global-permissions/ --- _Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md index 8dead2286..85e3f5866 100644 --- a/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md @@ -1,6 +1,8 @@ --- title: Locked Roles weight: 1129 +aliases: + - /rancher/v2.x/en/admin-settings/rbac/locked-roles/ --- You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md index db5cd9e8c..7459bcbf9 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md @@ -1,6 +1,8 @@ --- title: RKE Templates weight: 7010 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/ --- RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md index d51a8238a..5a1bd4802 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md @@ -1,6 +1,8 @@ --- title: Applying Templates weight: 50 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/applying-templates/ --- You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing) diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md index 533b4e1fd..d3966cab1 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md @@ -1,6 +1,8 @@ --- title: Creating and Revising Templates weight: 32 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/ --- This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md index 3c5104a09..d5f3ea341 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md @@ -1,6 +1,8 @@ --- title: Template Creator Permissions weight: 10 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/ --- Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md index 4e6324110..99828c0ef 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md @@ -1,6 +1,8 @@ --- title: Template Enforcement weight: 32 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/enforcement/ --- This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md index 50a2ea8bd..5bb86bdda 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md @@ -1,6 +1,8 @@ --- title: Example Scenarios weight: 5 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/ --- These example scenarios describe how an organization could use templates to standardize cluster creation. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/example-yaml/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/example-yaml/_index.md index 3c85e86d6..ff9f76e0b 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/example-yaml/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/example-yaml/_index.md @@ -1,6 +1,8 @@ --- title: Example YAML weight: 60 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/example-yaml/ --- Below is an example RKE template configuration file for reference. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md index 76cf9ce04..333396989 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md @@ -1,6 +1,8 @@ --- title: Overriding Template Settings weight: 33 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/overrides/ --- When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md index edc049ab6..2d1654729 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md @@ -1,6 +1,8 @@ --- title: RKE Templates and Infrastructure weight: 90 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/ --- In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md index 8a552a35a..ab995b0fa 100644 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md +++ b/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md @@ -1,6 +1,8 @@ --- title: Access and Sharing weight: 31 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/ --- If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. diff --git a/content/rancher/v2.5/en/api/_index.md b/content/rancher/v2.5/en/api/_index.md index 75b7acf2b..c6c3fdb63 100644 --- a/content/rancher/v2.5/en/api/_index.md +++ b/content/rancher/v2.5/en/api/_index.md @@ -1,6 +1,8 @@ --- title: API weight: 24 +aliases: + - /rancher/v2.x/en/api/ --- ## How to use the API diff --git a/content/rancher/v2.5/en/api/api-tokens/_index.md b/content/rancher/v2.5/en/api/api-tokens/_index.md index a0f19b8cd..1625c3010 100644 --- a/content/rancher/v2.5/en/api/api-tokens/_index.md +++ b/content/rancher/v2.5/en/api/api-tokens/_index.md @@ -3,6 +3,7 @@ title: API Tokens weight: 1 aliases: - /rancher/v2.5/en/cluster-admin/api/api-tokens/ + - /rancher/v2.x/en/api/api-tokens/ --- By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. Tokens are not invalidated by changing a password. diff --git a/content/rancher/v2.5/en/backups/_index.md b/content/rancher/v2.5/en/backups/_index.md index fa00a496d..ad2114c9f 100644 --- a/content/rancher/v2.5/en/backups/_index.md +++ b/content/rancher/v2.5/en/backups/_index.md @@ -3,6 +3,7 @@ title: Backups and Disaster Recovery weight: 5 aliases: - /rancher/v2.5/en/backups/v2.5 + - /rancher/v2.x/en/backups/v2.5/ --- In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. diff --git a/content/rancher/v2.5/en/backups/back-up-rancher/_index.md b/content/rancher/v2.5/en/backups/back-up-rancher/_index.md index 4371a4f49..51cdf22df 100644 --- a/content/rancher/v2.5/en/backups/back-up-rancher/_index.md +++ b/content/rancher/v2.5/en/backups/back-up-rancher/_index.md @@ -3,6 +3,8 @@ title: Backing up Rancher weight: 1 aliases: - /rancher/v2.5/en/backups/v2.5/back-up-rancher + - /rancher/v2.x/en/backups/ + - /rancher/v2.x/en/backups/v2.5/back-up-rancher/ --- In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer the instructions for [single node backups]({{}}/rancher/v2.5/en/backups/v2.5/docker-installs/docker-backups) diff --git a/content/rancher/v2.5/en/backups/configuration/_index.md b/content/rancher/v2.5/en/backups/configuration/_index.md index 52236ff1d..a7922993b 100644 --- a/content/rancher/v2.5/en/backups/configuration/_index.md +++ b/content/rancher/v2.5/en/backups/configuration/_index.md @@ -4,6 +4,7 @@ shortTitle: Configuration weight: 4 aliases: - /rancher/v2.5/en/backups/v2.5/configuration + - /rancher/v2.x/en/backups/v2.5/configuration/ --- - [Backup configuration](./backup-config) diff --git a/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md b/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md index 3588c3a14..5ec2433fc 100644 --- a/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md +++ b/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md @@ -4,6 +4,7 @@ shortTitle: Backup weight: 1 aliases: - /rancher/v2.5/en/backups/v2.5/configuration/backup-config + - /rancher/v2.x/en/backups/v2.5/configuration/backup-config/ --- The Backup Create page lets you configure a schedule, enable encryption and specify the storage location for your backups. diff --git a/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md b/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md index b6b62f901..aa837a810 100644 --- a/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md +++ b/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md @@ -4,6 +4,7 @@ shortTitle: Restore weight: 2 aliases: - /rancher/v2.5/en/backups/v2.5/configuration/restore-config + - /rancher/v2.x/en/backups/v2.5/configuration/restore-config/ --- The Restore Create page lets you provide details of the backup to restore from diff --git a/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md b/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md index 8acb97c56..5e8a6e2ce 100644 --- a/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md +++ b/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md @@ -4,6 +4,7 @@ shortTitle: Storage weight: 3 aliases: - /rancher/v2.5/en/backups/v2.5/configuration/storage-config + - /rancher/v2.x/en/backups/v2.5/configuration/storage-config/ --- Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible object store. diff --git a/content/rancher/v2.5/en/backups/docker-installs/_index.md b/content/rancher/v2.5/en/backups/docker-installs/_index.md index b6e88a8f8..2d6c07612 100644 --- a/content/rancher/v2.5/en/backups/docker-installs/_index.md +++ b/content/rancher/v2.5/en/backups/docker-installs/_index.md @@ -5,6 +5,7 @@ weight: 10 aliases: - /rancher/v2.5/en/installation/backups-and-restoration/single-node-backup-and-restoration/ - /rancher/v2.5/en/backups/v2.5/docker-installs + - /rancher/v2.x/en/backups/v2.5/docker-installs/ --- - [Backups](./docker-backups) diff --git a/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md b/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md index 507b7168b..8ceabd8db 100644 --- a/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md +++ b/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.5/en/backups/backups/single-node-backups/ - /rancher/v2.5/en/backups/legacy/backup/single-node-backups/ - /rancher/v2.5/en/backups/v2.5/docker-installs/docker-backups/ + - /rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups/ --- diff --git a/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md b/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md index c3c88f5af..fd4ddac53 100644 --- a/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md +++ b/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ - /rancher/v2.5/en/backups/restorations/single-node-restoration - /rancher/v2.5/en/backups/v2.5/docker-installs/docker-restores + - /rancher/v2.x/en/backups/v2.5/docker-installs/docker-restores/ --- If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. diff --git a/content/rancher/v2.5/en/backups/examples/_index.md b/content/rancher/v2.5/en/backups/examples/_index.md index fb00c44da..8b79893a8 100644 --- a/content/rancher/v2.5/en/backups/examples/_index.md +++ b/content/rancher/v2.5/en/backups/examples/_index.md @@ -3,6 +3,7 @@ title: Examples weight: 5 aliases: - /rancher/v2.5/en/backups/v2.5/examples + - /rancher/v2.x/en/backups/v2.5/examples/ --- This section contains examples of Backup and Restore custom resources. diff --git a/content/rancher/v2.5/en/backups/migrating-rancher/_index.md b/content/rancher/v2.5/en/backups/migrating-rancher/_index.md index 49e3e58d5..79a79dac0 100644 --- a/content/rancher/v2.5/en/backups/migrating-rancher/_index.md +++ b/content/rancher/v2.5/en/backups/migrating-rancher/_index.md @@ -1,6 +1,8 @@ --- title: Migrating Rancher to a New Cluster weight: 3 +aliases: + - /rancher/v2.x/en/backups/v2.5/migrating-rancher/ --- If you are migrating Rancher to a new Kubernetes cluster, you don't need to install Rancher on the new cluster first. If Rancher is restored to a new cluster with Rancher already installed, it can cause problems. diff --git a/content/rancher/v2.5/en/backups/restoring-rancher/_index.md b/content/rancher/v2.5/en/backups/restoring-rancher/_index.md index b5b849517..2791485ef 100644 --- a/content/rancher/v2.5/en/backups/restoring-rancher/_index.md +++ b/content/rancher/v2.5/en/backups/restoring-rancher/_index.md @@ -4,6 +4,7 @@ weight: 2 aliases: - /rancher/v2.x/en/installation/backups/restores - /rancher/v2.x/en/backups/restoring-rancher + - /rancher/v2.x/en/backups/v2.5/restoring-rancher/ --- A restore is performed by creating a Restore custom resource. diff --git a/content/rancher/v2.5/en/best-practices/_index.md b/content/rancher/v2.5/en/best-practices/_index.md index 7be71efeb..77a1ec66e 100644 --- a/content/rancher/v2.5/en/best-practices/_index.md +++ b/content/rancher/v2.5/en/best-practices/_index.md @@ -3,6 +3,8 @@ title: Best Practices Guide weight: 4 aliases: - /rancher/v2.5/en/best-practices/v2.5 + - /rancher/v2.x/en/best-practices/ + - /rancher/v2.x/en/best-practices/v2.5/ --- The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md index 7bcc920d4..58c57134e 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md @@ -4,6 +4,7 @@ shortTitle: Rancher Managed Clusters weight: 2 aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-managed + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/ --- ### Logging diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/containers/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/containers/_index.md index 3763c0b25..8e104b8a4 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/containers/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-managed/containers/_index.md @@ -4,6 +4,7 @@ weight: 100 aliases: - /rancher/v2.5/en/best-practices/containers - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/containers + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/containers/ --- Running well-built containers can greatly impact the overall performance and security of your environment. diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/logging/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/logging/_index.md index 30a9a2a8f..1efc247af 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/logging/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-managed/logging/_index.md @@ -3,6 +3,7 @@ title: Logging Best Practices weight: 1 aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/logging + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/logging/ --- In this guide, we recommend best practices for cluster-level logging and application logging. diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md index 12b794bac..0312fa70a 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md @@ -3,6 +3,7 @@ title: Best Practices for Rancher Managed vSphere Clusters shortTitle: Rancher Managed Clusters in vSphere aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/managed-vsphere + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/managed-vsphere/ --- This guide outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md index 62da3f815..a385c2ea4 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md @@ -3,6 +3,7 @@ title: Monitoring Best Practices weight: 2 aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/monitoring + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/monitoring/ --- Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. This is not different when using Kubernetes and Rancher. Fortunately the integrated monitoring and alerting functionality makes this whole process a lot easier. diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/_index.md index 2967207d2..32606b0db 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-server/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-server/_index.md @@ -4,6 +4,7 @@ shortTitle: Rancher Server weight: 1 aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-server + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/ --- This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md index 15b895e94..c745f5dc1 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md @@ -3,6 +3,7 @@ title: Rancher Deployment Strategy weight: 100 aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-server/deployment-strategies + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-strategies/ --- There are two recommended deployment strategies for a Rancher server that manages downstream Kubernetes clusters. Each one has its own pros and cons. Read more about which one would fit best for your use case: diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md index 10baf18b8..f32518a92 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md @@ -4,6 +4,7 @@ weight: 100 aliases: - /rancher/v2.5/en/best-practices/deployment-types - /rancher/v2.5/en/best-practices/v2.5/rancher-server/deployment-types + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-types/ --- This guide is geared toward use cases where Rancher is used to manage downstream Kubernetes clusters. The high-availability setup is intended to prevent losing access to downstream clusters if the Rancher server is not available. diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md index d4f9f6444..9b9877264 100644 --- a/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md +++ b/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md @@ -4,6 +4,7 @@ shortTitle: On-Premises Rancher in vSphere weight: 3 aliases: - /rancher/v2.5/en/best-practices/v2.5/rancher-server/rancher-in-vsphere + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/rancher-in-vsphere/ --- This guide outlines a reference architecture for installing Rancher on an RKE Kubernetes cluster in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. diff --git a/content/rancher/v2.5/en/cis-scans/_index.md b/content/rancher/v2.5/en/cis-scans/_index.md index d8d1afce1..39cbc411e 100644 --- a/content/rancher/v2.5/en/cis-scans/_index.md +++ b/content/rancher/v2.5/en/cis-scans/_index.md @@ -3,6 +3,8 @@ title: CIS Scans weight: 17 aliases: - /rancher/v2.5/en/cis-scans/v2.5 + - /rancher/v2.x/en/cis-scans/ + - /rancher/v2.x/en/cis-scans/v2.5/ --- Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. diff --git a/content/rancher/v2.5/en/cis-scans/configuration/_index.md b/content/rancher/v2.5/en/cis-scans/configuration/_index.md index 38e4eaed6..e69ffd092 100644 --- a/content/rancher/v2.5/en/cis-scans/configuration/_index.md +++ b/content/rancher/v2.5/en/cis-scans/configuration/_index.md @@ -3,6 +3,7 @@ title: Configuration weight: 3 aliases: - /rancher/v2.5/en/cis-scans/v2.5/configuration + - /rancher/v2.x/en/cis-scans/v2.5/configuration/ --- This configuration reference is intended to help you manage the custom resources created by the `rancher-cis-benchmark` application. These resources are used for performing CIS scans on a cluster, skipping tests, setting the test profile that will be used during a scan, and other customization. diff --git a/content/rancher/v2.5/en/cis-scans/custom-benchmark/_index.md b/content/rancher/v2.5/en/cis-scans/custom-benchmark/_index.md index 5b4627ea4..993ba9568 100644 --- a/content/rancher/v2.5/en/cis-scans/custom-benchmark/_index.md +++ b/content/rancher/v2.5/en/cis-scans/custom-benchmark/_index.md @@ -3,6 +3,7 @@ title: Creating a Custom Benchmark Version for Running a Cluster Scan weight: 4 aliases: - /rancher/v2.5/en/cis-scans/v2.5/custom-benchmark + - /rancher/v2.x/en/cis-scans/v2.5/custom-benchmark/ --- _Available as of v2.5.4_ diff --git a/content/rancher/v2.5/en/cis-scans/rbac/_index.md b/content/rancher/v2.5/en/cis-scans/rbac/_index.md index 66cff4f39..1f389469c 100644 --- a/content/rancher/v2.5/en/cis-scans/rbac/_index.md +++ b/content/rancher/v2.5/en/cis-scans/rbac/_index.md @@ -5,6 +5,7 @@ weight: 3 aliases: - /rancher/v2.5/en/cis-scans/rbac - /rancher/v2.5/en/cis-scans/v2.5/rbac + - /rancher/v2.x/en/cis-scans/v2.5/rbac/ --- This section describes the permissions required to use the rancher-cis-benchmark App. diff --git a/content/rancher/v2.5/en/cis-scans/skipped-tests/_index.md b/content/rancher/v2.5/en/cis-scans/skipped-tests/_index.md index f997b814e..8f43f3946 100644 --- a/content/rancher/v2.5/en/cis-scans/skipped-tests/_index.md +++ b/content/rancher/v2.5/en/cis-scans/skipped-tests/_index.md @@ -4,6 +4,7 @@ weight: 3 aliases: - /rancher/v2.5/en/cis-scans/skipped-tests - /rancher/v2.5/en/cis-scans/v2.5/skipped-tests + - /rancher/v2.x/en/cis-scans/v2.5/skipped-tests/ --- This section lists the tests that are skipped in the permissive test profile for RKE. diff --git a/content/rancher/v2.5/en/cluster-admin/_index.md b/content/rancher/v2.5/en/cluster-admin/_index.md index 0dc0346a6..26a02781c 100644 --- a/content/rancher/v2.5/en/cluster-admin/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/_index.md @@ -1,6 +1,8 @@ --- title: Cluster Administration weight: 8 +aliases: + - /rancher/v2.x/en/cluster-admin/ --- After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. diff --git a/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md b/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md index 649417887..0f6be35af 100644 --- a/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md @@ -1,6 +1,8 @@ --- title: Backing up a Cluster weight: 2045 +aliases: + - /rancher/v2.x/en/cluster-admin/backing-up-etcd/ --- In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) can be easily performed. diff --git a/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md b/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md index 30ed0bde8..b3f52ad2d 100644 --- a/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md @@ -1,6 +1,8 @@ --- title: Certificate Rotation weight: 2040 +aliases: + - /rancher/v2.x/en/cluster-admin/certificate-rotation/ --- > **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. diff --git a/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md index 59f8087da..00f105535 100644 --- a/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md @@ -4,6 +4,7 @@ description: Learn about cluster cleanup when removing nodes from your Rancher-l weight: 2055 aliases: - /rancher/v2.5/en/faq/cleaning-cluster-nodes/ + - /rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/ --- This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. diff --git a/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md index ea1257aa1..78853f23b 100644 --- a/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md @@ -3,6 +3,7 @@ title: Cloning Clusters weight: 2035 aliases: - /rancher/v2.5/en/cluster-provisioning/cloning-clusters/ + - /rancher/v2.x/en/cluster-admin/cloning-clusters/ --- If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md index 8d882c391..45362ec1e 100644 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md @@ -1,6 +1,8 @@ --- title: Cluster Access weight: 1 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-access/ --- This section is about what tools can be used to access clusters managed by Rancher. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md index 784b69c57..d463b7c04 100644 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md @@ -1,6 +1,8 @@ --- title: How the Authorized Cluster Endpoint Works weight: 2015 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-access/ace/ --- This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md index 2de644655..293c2c28e 100644 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/k8s-in-rancher/cluster-members/ - /rancher/v2.5/en/cluster-admin/cluster-members - /rancher/v2.5/en/cluster-provisioning/cluster-members/ + - /rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/ --- If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md index d4a2e08f1..5b78038e5 100644 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.5/en/concepts/clusters/kubeconfig-files/ - /rancher/v2.5/en/k8s-in-rancher/kubeconfig/ - /rancher/2.x/en/cluster-admin/kubeconfig + - /rancher/v2.x/en/cluster-admin/cluster-access/kubectl/ --- This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md index 4c5cc1dc6..77a6f8201 100644 --- a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md @@ -1,6 +1,8 @@ --- title: Cluster Autoscaler weight: 1 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-autoscaler/ --- In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md index 86eb6e4e3..940ea5488 100644 --- a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md @@ -1,6 +1,8 @@ --- title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups weight: 1 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon/ --- This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md index 68996a69b..e98acf036 100644 --- a/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md @@ -3,6 +3,7 @@ title: Cluster Configuration weight: 2025 aliases: - /rancher/v2.5/en/k8s-in-rancher/editing-clusters + - /rancher/v2.x/en/cluster-admin/editing-clusters/ --- After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. diff --git a/content/rancher/v2.5/en/cluster-admin/nodes/_index.md b/content/rancher/v2.5/en/cluster-admin/nodes/_index.md index df97cabd5..514ebc67a 100644 --- a/content/rancher/v2.5/en/cluster-admin/nodes/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/nodes/_index.md @@ -1,6 +1,8 @@ --- title: Nodes and Node Pools weight: 2030 +aliases: + - /rancher/v2.x/en/cluster-admin/nodes/ --- After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.5/en/cluster-provisioning/) to provision the cluster, there are different node options available. diff --git a/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md b/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md index d902a8495..3614a7b6e 100644 --- a/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md @@ -1,6 +1,8 @@ --- title: Adding a Pod Security Policy weight: 80 +aliases: + - /rancher/v2.x/en/cluster-admin/pod-security-policy/ --- > **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) diff --git a/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md b/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md index cc6f053a2..86c6574c2 100644 --- a/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md @@ -6,7 +6,8 @@ aliases: - /rancher/v2.5/en/concepts/projects/ - /rancher/v2.5/en/tasks/projects/ - /rancher/v2.5/en/tasks/projects/create-project/ - - /rancher/v2.5/en/tasks/projects/create-project/ + - /rancher/v2.5/en/tasks/projects/create-project/ + - /rancher/v2.x/en/cluster-admin/projects-and-namespaces/ --- A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. diff --git a/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md b/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md index 03560c17b..9cc546c05 100644 --- a/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md @@ -1,6 +1,8 @@ --- title: Restoring a Cluster from Backup weight: 2050 +aliases: + - /rancher/v2.x/en/cluster-admin/restoring-etcd/ --- etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. diff --git a/content/rancher/v2.5/en/cluster-admin/tools/_index.md b/content/rancher/v2.5/en/cluster-admin/tools/_index.md index 7438d3b94..39835e7e0 100644 --- a/content/rancher/v2.5/en/cluster-admin/tools/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/tools/_index.md @@ -3,6 +3,7 @@ title: Tools for Logging, Monitoring, and Visibility weight: 2033 aliases: - /rancher/v2.5/en/tools/notifiers-and-alerts/ + - /rancher/v2.x/en/cluster-admin/tools/ --- Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: diff --git a/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md b/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md index 3c89e2a61..494396034 100644 --- a/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md @@ -1,6 +1,8 @@ --- title: Upgrading and Rolling Back Kubernetes weight: 70 +aliases: + - /rancher/v2.x/en/cluster-admin/upgrading-kubernetes/ --- Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md index ad6328fba..18273041b 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md @@ -5,6 +5,7 @@ weight: 2031 aliases: - /rancher/v2.5/en/tasks/clusters/adding-storage/ - /rancher/v2.5/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/ --- When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md index 4d1ebf616..619c89bae 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md @@ -3,6 +3,7 @@ title: Setting up Existing Storage weight: 1 aliases: - /rancher/v2.5/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/ --- This section describes how to set up existing persistent storage for workloads in Rancher. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md index fbc7451b5..43d9327a6 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md @@ -1,6 +1,8 @@ --- title: Using an External Ceph Driver weight: 10 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/ --- These instructions are about using the external Ceph driver in an RKE2 cluster. If you are using RKE, additional steps are required. For details, refer to [this section.](#using-the-ceph-driver-with-rke) diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md index 339efbf12..5227586f1 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md @@ -4,6 +4,7 @@ weight: 3053 aliases: - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/ - /rancher/v2.5/en/k8s-in-rancher/volumes-and-storage/examples/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ --- Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md index 22ecb6d72..3a33a7369 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md @@ -1,6 +1,8 @@ --- title: Creating Persistent Storage in Amazon's EBS weight: 3053 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/ --- This section describes how to set up Amazon's Elastic Block Store in EC2. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md index 608b5f569..395c2b516 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md @@ -3,6 +3,7 @@ title: NFS Storage weight: 3054 aliases: - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/ --- Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md index e13c34e61..8893877c3 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md @@ -3,6 +3,7 @@ title: vSphere Storage weight: 3055 aliases: - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/ --- To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a persistent volume claim. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md index da7b3889d..81249a85c 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md @@ -1,6 +1,8 @@ --- title: GlusterFS Volumes weight: 5000 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/ --- > This section only applies to [RKE clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md index 64b199698..11279b1b8 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md @@ -3,6 +3,7 @@ title: How Persistent Storage Works weight: 1 aliases: - /rancher/v2.5/en/tasks/workloads/add-persistent-volume-claim + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/ --- A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md index 154ac03dd..1a30d5254 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md @@ -1,6 +1,8 @@ --- title: iSCSI Volumes weight: 6000 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/ --- In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md index 54370f020..5edb8a7d7 100644 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md @@ -1,6 +1,8 @@ --- title: Dynamically Provisioning New Storage in Rancher weight: 2 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/ --- This section describes how to provision new persistent storage for workloads in Rancher. diff --git a/content/rancher/v2.5/en/cluster-provisioning/_index.md b/content/rancher/v2.5/en/cluster-provisioning/_index.md index 3be9c2d05..a46b43f5d 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/concepts/clusters/ - /rancher/v2.5/en/concepts/clusters/cluster-providers/ - /rancher/v2.5/en/tasks/clusters/ + - /rancher/v2.x/en/cluster-provisioning/ --- Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md index 2b196bcea..114b75e9c 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md @@ -1,6 +1,8 @@ --- title: Setting up Clusters from Hosted Kubernetes Providers weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ --- In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md index fd4051e75..0bda69520 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md @@ -2,6 +2,8 @@ title: Creating an Aliyun ACK Cluster shortTitle: Alibaba Cloud Container Service for Kubernetes weight: 2120 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/ --- You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md index 8e16c2b40..8ffd0f8bb 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md @@ -4,6 +4,7 @@ shortTitle: Azure Kubernetes Service weight: 2115 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-azure-container-service/ + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/ --- You can use Rancher to create a cluster hosted in Microsoft Azure Kubernetes Service (AKS). diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md index edaa4a47a..eace4d7ca 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md @@ -2,6 +2,8 @@ title: Creating a Huawei CCE Cluster shortTitle: Huawei Cloud Kubernetes Service weight: 2130 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/ --- You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md index 441d7ed78..238530f2c 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md @@ -4,6 +4,7 @@ shortTitle: Amazon EKS weight: 2110 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-eks/ + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/ --- Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md index ebdf64329..012bf202f 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md @@ -5,6 +5,7 @@ weight: 2105 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-gke/ - /rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/ --- {{% tabs %}} diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md index 2420cb57d..6fdb9597d 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md @@ -2,6 +2,8 @@ title: Creating a Tencent TKE Cluster shortTitle: Tencent Kubernetes Engine weight: 2125 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/ --- You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. diff --git a/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md index 6798b1b53..54e83ca88 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md @@ -1,6 +1,8 @@ --- title: Node Requirements for Rancher Managed Clusters weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/node-requirements/ --- This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. diff --git a/content/rancher/v2.5/en/cluster-provisioning/production/_index.md b/content/rancher/v2.5/en/cluster-provisioning/production/_index.md index 040e59b55..c765e2ed3 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/production/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/production/_index.md @@ -1,6 +1,8 @@ --- title: Checklist for Production-Ready Clusters weight: 2 +aliases: + - /rancher/v2.x/en/cluster-provisioning/production/ --- In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. diff --git a/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md index e4d6979de..108741dec 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md @@ -1,6 +1,8 @@ --- title: Roles for Nodes in Kubernetes weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/ --- This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. diff --git a/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md b/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md index 6824da88b..c874c1596 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md @@ -1,6 +1,8 @@ --- title: Recommended Cluster Architecture weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/ --- There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. diff --git a/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md index 34fd7d6a1..c985b76cb 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md @@ -3,6 +3,8 @@ title: Registering Existing Clusters weight: 6 aliases: - /rancher/v2.5/en/cluster-provisioning/imported-clusters + - /rancher/v2.x/en/cluster-provisioning/imported-clusters/ + - /rancher/v2.x/en/cluster-provisioning/registered-clusters/ --- The cluster registration feature replaced the feature to import clusters. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md index de4694235..d1aa11905 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md @@ -1,6 +1,8 @@ --- title: Launching Kubernetes with Rancher weight: 4 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/ --- You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md index 79e705f92..82f660865 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md @@ -4,6 +4,7 @@ weight: 2300 aliases: - /rancher/v2.5/en/concepts/clusters/cloud-providers/ - /rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/ --- A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. For more information, refer to the [official Kubernetes documentation on cloud providers.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md index 3549afe1d..d473d1e72 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md @@ -1,6 +1,8 @@ --- title: Setting up the Amazon Cloud Provider weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/ --- When using the `Amazon` cloud provider, you can leverage the following capabilities: diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md index 258845725..0e9498bc5 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md @@ -1,6 +1,8 @@ --- title: Setting up the Azure Cloud Provider weight: 2 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure/ --- When using the `Azure` cloud provider, you can leverage the following capabilities: diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md index 000b537c1..f06482536 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md @@ -1,6 +1,8 @@ --- title: Setting up the Google Compute Engine Cloud Provider weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce/ --- In this section, you'll learn how to enable the Google Compute Engine (GCE) cloud provider for custom clusters in Rancher. A custom cluster is one in which Rancher installs Kubernetes on existing nodes. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md index 5bc41b340..4e63d522e 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md @@ -1,6 +1,8 @@ --- title: Setting up the vSphere Cloud Provider weight: 4 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/ --- In this section, you'll learn how to set up a vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md index d4a19ef8b..6902e487f 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md @@ -2,6 +2,8 @@ title: How to Configure In-tree vSphere Cloud Provider shortTitle: In-tree Cloud Provider weight: 10 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/ --- To set up the in-tree vSphere cloud provider, follow these steps while creating the vSphere cluster in Rancher: diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md index a390d05b5..5dc20d04b 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md @@ -2,6 +2,8 @@ title: How to Configure Out-of-tree vSphere Cloud Provider shortTitle: Out-of-tree Cloud Provider weight: 10 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/ --- _Available as of v2.5+_ diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md index 0582fb22b..fb84e8c92 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md @@ -1,6 +1,8 @@ --- title: Migrating vSphere In-tree Volumes to CSI weight: 5 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/ --- _Available as of v2.5+_ diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md index 85ddf70f2..2a704050c 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md @@ -6,6 +6,7 @@ weight: 2225 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ - /rancher/v2.5/en/cluster-provisioning/custom-clusters/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ --- When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md index 2a100a7f6..0e12cb24e 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md @@ -4,6 +4,7 @@ weight: 2500 aliases: - /rancher/v2.5/en/admin-settings/agent-options/ - /rancher/v2.5/en/cluster-provisioning/custom-clusters/agent-options + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/ --- Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes) and add the options to the generated `docker run` command when adding a node. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md index 26bfc91b3..b1ce37d00 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md @@ -3,6 +3,7 @@ title: Launching Kubernetes on New Nodes in an Infrastructure Provider weight: 2205 aliases: - /rancher/v2.5/en/concepts/global-configuration/node-templates/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ --- Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md index 85a89c6a2..7e4e12792 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md @@ -4,6 +4,7 @@ shortTitle: Azure weight: 2220 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-azure/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/ --- In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md index e7b00c936..f755dc609 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md @@ -1,6 +1,8 @@ --- title: Azure Node Template Configuration weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/ --- For more information about Azure, refer to the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/?product=featured) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md index c891457c8..6231b1739 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md @@ -4,6 +4,7 @@ shortTitle: DigitalOcean weight: 2215 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-digital-ocean/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/ --- In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md index 84171f272..5ce39d732 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md @@ -1,6 +1,8 @@ --- title: DigitalOcean Node Template Configuration weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/ ---- Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md index 6eb930530..cfbd8ad23 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md @@ -3,6 +3,8 @@ title: Creating an Amazon EC2 Cluster shortTitle: Amazon EC2 description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher weight: 2210 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ --- In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md index ca30c9abf..e4a50144a 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md @@ -1,6 +1,8 @@ --- title: EC2 Node Template Configuration weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ --- For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md index f9672b462..0fc3708e0 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md @@ -6,6 +6,7 @@ metaDescription: Use Rancher to create a vSphere cluster. It may consist of grou weight: 2225 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/ --- By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md index ca4598683..cf2082a1d 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md @@ -3,6 +3,7 @@ title: Creating Credentials in the vSphere Console weight: 3 aliases: - /rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/ --- This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md index 2d960b8a8..1a9e64e20 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md @@ -1,6 +1,8 @@ --- title: Provisioning Kubernetes Clusters in vSphere weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/ --- In this section, you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md index 5676424c5..9271841fa 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md @@ -4,6 +4,7 @@ weight: 2 aliases: - /rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference - /rancher/v2.5/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/ --- The following node template configuration reference applies to Rancher v2.3.3+. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md index df6208803..662af99a3 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md @@ -1,6 +1,8 @@ --- title: RKE Cluster Configuration Reference weight: 2250 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/ --- When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) as the Kubernetes distribution. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md index 98a232126..d06de7da0 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md @@ -1,6 +1,8 @@ --- title: Assigning Pod Security Policies weight: 2260 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/ --- _Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md index 60e609230..39b657101 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md @@ -1,6 +1,8 @@ --- title: Rancher Agents weight: 2400 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/ --- There are two different agent resources deployed on Rancher managed clusters: diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md index 0a024cceb..37ff393ed 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md @@ -1,6 +1,8 @@ --- title: Launching Kubernetes on Windows Clusters weight: 2240 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/ --- When provisioning a [custom cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md index 10f8743b8..c67aaa96c 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md @@ -1,6 +1,8 @@ --- title: Configuration for Storage Classes in Azure weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/ --- If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md index ee075c394..3be832aae 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md @@ -1,6 +1,8 @@ --- title: Networking Requirements for Host Gateway (L2bridge) weight: 1000 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/ --- This section describes how to configure custom Windows clusters that are using *Host Gateway (L2bridge)* mode. diff --git a/content/rancher/v2.5/en/contributing/_index.md b/content/rancher/v2.5/en/contributing/_index.md index 4d81c27ac..3f27aced5 100644 --- a/content/rancher/v2.5/en/contributing/_index.md +++ b/content/rancher/v2.5/en/contributing/_index.md @@ -3,6 +3,7 @@ title: Contributing to Rancher weight: 27 aliases: - /rancher/v2.5/en/faq/contributing/ + - /rancher/v2.x/en/contributing/ --- This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. diff --git a/content/rancher/v2.5/en/deploy-across-clusters/_index.md b/content/rancher/v2.5/en/deploy-across-clusters/_index.md index bb41e0e6e..fe47c8326 100644 --- a/content/rancher/v2.5/en/deploy-across-clusters/_index.md +++ b/content/rancher/v2.5/en/deploy-across-clusters/_index.md @@ -1,6 +1,8 @@ --- title: Deploying Applications across Clusters weight: 12 +aliases: + - /rancher/v2.x/en/deploy-across-clusters/ --- ### Fleet diff --git a/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md b/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md index bee613fe3..24d1a59a0 100644 --- a/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md +++ b/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md @@ -1,6 +1,8 @@ --- title: Fleet - GitOps at Scale weight: 1 +aliases: + - /rancher/v2.x/en/deploy-across-clusters/fleet/ --- _Available as of Rancher v2.5_ diff --git a/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md b/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md index d1acf0324..862daaaca 100644 --- a/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md +++ b/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md @@ -1,6 +1,8 @@ --- title: Multi-cluster Apps weight: 2 +aliases: + - /rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps/ --- > As of Rancher v2.5, we now recommend using [Fleet]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet) for deploying apps across clusters. diff --git a/content/rancher/v2.5/en/faq/_index.md b/content/rancher/v2.5/en/faq/_index.md index 339e94a09..9e0ab2b64 100644 --- a/content/rancher/v2.5/en/faq/_index.md +++ b/content/rancher/v2.5/en/faq/_index.md @@ -3,6 +3,7 @@ title: FAQ weight: 25 aliases: - /rancher/v2.5/en/about/ + - /rancher/v2.x/en/faq/ --- This FAQ is a work in progress designed to answer the questions our users most frequently ask about Rancher v2.x. diff --git a/content/rancher/v2.5/en/faq/deprecated-features-25x/_index.md b/content/rancher/v2.5/en/faq/deprecated-features-25x/_index.md index a30b42122..e13380c31 100644 --- a/content/rancher/v2.5/en/faq/deprecated-features-25x/_index.md +++ b/content/rancher/v2.5/en/faq/deprecated-features-25x/_index.md @@ -1,6 +1,8 @@ --- title: Deprecated Features in Rancher v2.5 weight: 100 +aliases: + - /rancher/v2.x/en/faq/deprecated-features-25x/ --- ### What is Rancher's Deprecation policy? diff --git a/content/rancher/v2.5/en/faq/kubectl/_index.md b/content/rancher/v2.5/en/faq/kubectl/_index.md index b4172ab0a..3414564db 100644 --- a/content/rancher/v2.5/en/faq/kubectl/_index.md +++ b/content/rancher/v2.5/en/faq/kubectl/_index.md @@ -1,6 +1,8 @@ --- title: Installing and Configuring kubectl weight: 100 +aliases: + - /rancher/v2.x/en/faq/kubectl/ --- `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. diff --git a/content/rancher/v2.5/en/faq/networking/_index.md b/content/rancher/v2.5/en/faq/networking/_index.md index 1b3488e9f..092cb4d1b 100644 --- a/content/rancher/v2.5/en/faq/networking/_index.md +++ b/content/rancher/v2.5/en/faq/networking/_index.md @@ -1,6 +1,8 @@ --- title: Networking weight: 8005 +aliases: + - /rancher/v2.x/en/faq/networking/ --- Networking FAQ's diff --git a/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md index 6fc64f0e1..6e19619b2 100644 --- a/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md +++ b/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md @@ -2,6 +2,8 @@ title: Container Network Interface (CNI) Providers description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you weight: 2300 +aliases: + - /rancher/v2.x/en/faq/networking/cni-providers/ --- ## What is CNI? diff --git a/content/rancher/v2.5/en/faq/removing-rancher/_index.md b/content/rancher/v2.5/en/faq/removing-rancher/_index.md index be8e6175d..49c1acde9 100644 --- a/content/rancher/v2.5/en/faq/removing-rancher/_index.md +++ b/content/rancher/v2.5/en/faq/removing-rancher/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/installation/removing-rancher/ - /rancher/v2.5/en/admin-settings/removing-rancher/ - /rancher/v2.5/en/admin-settings/removing-rancher/rancher-cluster-nodes/ + - /rancher/v2.x/en/faq/removing-rancher/ --- This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. diff --git a/content/rancher/v2.5/en/faq/security/_index.md b/content/rancher/v2.5/en/faq/security/_index.md index 29e0cc3fb..c0162f779 100644 --- a/content/rancher/v2.5/en/faq/security/_index.md +++ b/content/rancher/v2.5/en/faq/security/_index.md @@ -1,7 +1,8 @@ --- title: Security weight: 8007 - +aliases: + - /rancher/v2.x/en/faq/security/ --- **Is there a Hardening Guide?** diff --git a/content/rancher/v2.5/en/faq/technical/_index.md b/content/rancher/v2.5/en/faq/technical/_index.md index 6abef1933..4aaecf857 100644 --- a/content/rancher/v2.5/en/faq/technical/_index.md +++ b/content/rancher/v2.5/en/faq/technical/_index.md @@ -1,6 +1,8 @@ --- title: Technical weight: 8006 +aliases: + - /rancher/v2.x/en/faq/technical/ --- ### How can I reset the administrator password? diff --git a/content/rancher/v2.5/en/faq/telemetry/_index.md b/content/rancher/v2.5/en/faq/telemetry/_index.md index 6ab582667..2aaccbdd5 100644 --- a/content/rancher/v2.5/en/faq/telemetry/_index.md +++ b/content/rancher/v2.5/en/faq/telemetry/_index.md @@ -1,6 +1,8 @@ --- title: Telemetry weight: 8008 +aliases: + - /rancher/v2.x/en/faq/telemetry/ --- ### What is Telemetry? diff --git a/content/rancher/v2.5/en/helm-charts/_index.md b/content/rancher/v2.5/en/helm-charts/_index.md index d20b40a22..e5e6ba785 100644 --- a/content/rancher/v2.5/en/helm-charts/_index.md +++ b/content/rancher/v2.5/en/helm-charts/_index.md @@ -6,6 +6,18 @@ aliases: - /rancher/v2.5/en/catalog/ - /rancher/v2.5/en/catalog/apps - /rancher/v2.5/en/catalog/launching-apps + - /rancher/v2.x/en/helm-charts/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/launching-apps/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/built-in/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/ + - /rancher/v2.x/en/helm-charts/apps-marketplace/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/tutorial/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/managing-apps/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/multi-cluster-apps/ --- In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. diff --git a/content/rancher/v2.5/en/installation/_index.md b/content/rancher/v2.5/en/installation/_index.md index c722beafa..409e36d4d 100644 --- a/content/rancher/v2.5/en/installation/_index.md +++ b/content/rancher/v2.5/en/installation/_index.md @@ -4,6 +4,7 @@ description: Learn how to install Rancher in development and production environm weight: 3 aliases: - /rancher/v2.5/en/installation/how-ha-works/ + - /rancher/v2.x/en/installation/ --- This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md index d3d3f3828..aeb9a5844 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke - /rancher/v2.5/en/installation/ha-server-install - /rancher/v2.5/en/installation/install-rancher-on-k8s/install + - /rancher/v2.x/en/installation/install-rancher-on-k8s/ --- In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md index 53bb9ecfc..0e9b59f71 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md @@ -2,6 +2,8 @@ title: Installing Rancher on Amazon EKS shortTitle: Amazon EKS weight: 4 +aliases: + - /rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/ --- This page covers two ways to install Rancher on EKS. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md index 68bdf90f0..52ec71d62 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/installation/options/chart-options/ - /rancher/v2.5/en/installation/options/helm2/helm-rancher/chart-options/ - /rancher/v2.5/en/installation/resources/chart-options + - /rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/ --- This page is a configuration reference for the Rancher Helm chart. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md index 7fd6ec3e7..8f7a32d5b 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks/ha-server-rollbacks - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/rollbacks + - /rancher/v2.x/en/installation/install-rancher-on-k8s/rollbacks/ --- - [Rolling Back to Rancher v2.5.0+](#rolling-back-to-rancher-v2-5-0) diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md index f3ac8f7e5..49ce4d505 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md @@ -15,6 +15,7 @@ aliases: - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha - /rancher/v2.5/en/installation/upgrades-rollbacks/ - /rancher/v2.5/en/upgrades/ + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/ --- The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/_index.md index 20b34efef..9fc0270d9 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/_index.md @@ -1,6 +1,8 @@ --- title: Other Installation Methods weight: 3 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/ --- ### Air Gapped Installations diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md index 57d7dc158..fad196708 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/installation/air-gap-installation/ - /rancher/v2.5/en/installation/air-gap-high-availability/ - /rancher/v2.5/en/installation/air-gap-single-node/ + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/ --- This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md index d675a8f89..d97e22668 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md @@ -8,6 +8,7 @@ aliases: - /rancher/v2.5/en/installation/air-gap/install-rancher - /rancher/v2.5/en/installation/air-gap-installation/install-rancher/ - /rancher/v2.5/en/installation/air-gap-high-availability/install-rancher/ + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/ --- This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md index d30cfd006..53405d376 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md @@ -3,6 +3,7 @@ title: '3. Install Kubernetes (Skip for Docker Installs)' weight: 300 aliases: - /rancher/v2.5/en/installation/air-gap-high-availability/install-kube + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/ --- > Skip this section if you are installing Rancher on a single node with Docker. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md index ec3ff56d2..d42ee6f04 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.5/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - /rancher/v2.5/en/installation/air-gap-installation/prepare-private-reg/ + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/ --- This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md index 0d21d8ca3..4879223c5 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md @@ -3,6 +3,7 @@ title: '1. Set up Infrastructure and Private Registry' weight: 100 aliases: - /rancher/v2.5/en/installation/air-gap-single-node/provision-host + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/ --- In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md index 958d25fbb..1585ae7c0 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md @@ -1,6 +1,8 @@ --- title: Installing Rancher behind an HTTP Proxy weight: 4 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/ --- In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md index add0d1c7a..fac08ab40 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md @@ -1,6 +1,8 @@ --- title: 3. Install Rancher weight: 300 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/ --- Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md index 8634abe76..d0b84f888 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md @@ -1,6 +1,8 @@ --- title: '2. Install Kubernetes' weight: 200 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/ --- Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md index 082dec158..3e3d9370e 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md @@ -1,6 +1,8 @@ --- title: '1. Set up Infrastructure' weight: 100 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/ --- In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md index fc61e42af..8b3d6e7b4 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md @@ -3,6 +3,7 @@ title: Install/Upgrade Rancher with RancherD weight: 3 aliases: - /rancher/v2.5/en/installation/install-rancher-on-linux + - /rancher/v2.x/en/installation/install-rancher-on-linux/ --- _Available as of Rancher v2.5.4_ diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md index 2e5965b94..770326f9b 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md @@ -3,6 +3,7 @@ title: RancherD Configuration Reference weight: 1 aliases: - /rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration + - /rancher/v2.x/en/installation/install-rancher-on-linux/rancherd-configuration/ --- > RancherD is an experimental feature. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md index 68a8b6dab..d91448677 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md @@ -3,6 +3,7 @@ title: Rollbacks weight: 3 aliases: - /rancher/v2.5/en/installation/install-rancher-on-linux/rollbacks + - /rancher/v2.x/en/installation/install-rancher-on-linux/rollbacks/ --- > RancherD is an experimental feature. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md index a57c16f1a..66f411391 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md @@ -3,6 +3,7 @@ title: Upgrades weight: 2 aliases: - /rancher/v2.5/en/installation/install-rancher-on-linux/upgrades + - /rancher/v2.x/en/installation/install-rancher-on-linux/upgrades/ --- > RancherD is an experimental feature. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md index 14aaba604..d28ce130f 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md @@ -6,6 +6,8 @@ aliases: - /rancher/v2.5/en/installation/single-node-install/ - /rancher/v2.5/en/installation/single-node - /rancher/v2.5/en/installation/other-installation-methods/single-node + - /rancher/v2.x/en/installation/requirements/installing-docker/ + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/ --- Rancher can be installed by running a single Docker container. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md index ff867857d..ef4cfe2c6 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md @@ -1,6 +1,8 @@ --- title: Advanced Options for Docker Installs weight: 5 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/ --- When installing Rancher, there are several [advanced options]({{}}/rancher/v2.5/en/installation/options/) that can be enabled: diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md index d10edc10f..1bb416a90 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md @@ -4,6 +4,7 @@ weight: 251 aliases: - /rancher/v2.5/en/installation/proxy-configuration/ - /rancher/v2.5/en/installation/single-node/proxy + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/ --- If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md index 60c84fdda..11189b9dd 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md @@ -4,6 +4,7 @@ weight: 1015 aliases: - /rancher/v2.5/en/upgrades/single-node-rollbacks - /rancher/v2.5/en/upgrades/rollbacks/single-node-rollbacks + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/ --- If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades). Rolling back restores: diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md index bf63fce77..ce5f6c3f6 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.5/en/upgrades/upgrades/single-node - /rancher/v2.5/en/upgrades/upgrades/single-node-upgrade/ - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/upgrades/single-node/ + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/ --- The following instructions will guide you through upgrading a Rancher server that was installed with Docker. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md index 9a1fc02ee..b2af48b98 100644 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md @@ -1,6 +1,8 @@ --- title: Certificate Troubleshooting weight: 4 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/ --- ### How Do I Know if My Certificates are in PEM Format? diff --git a/content/rancher/v2.5/en/installation/requirements/_index.md b/content/rancher/v2.5/en/installation/requirements/_index.md index 9e2a6be96..887226232 100644 --- a/content/rancher/v2.5/en/installation/requirements/_index.md +++ b/content/rancher/v2.5/en/installation/requirements/_index.md @@ -2,6 +2,8 @@ title: Installation Requirements description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup weight: 1 +aliases: + - /rancher/v2.x/en/installation/requirements/ --- This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. diff --git a/content/rancher/v2.5/en/installation/requirements/ports/_index.md b/content/rancher/v2.5/en/installation/requirements/ports/_index.md index 050c28127..fb88592b0 100644 --- a/content/rancher/v2.5/en/installation/requirements/ports/_index.md +++ b/content/rancher/v2.5/en/installation/requirements/ports/_index.md @@ -2,6 +2,8 @@ title: Port Requirements description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes weight: 300 +aliases: + - /rancher/v2.x/en/installation/requirements/ports/ --- To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. diff --git a/content/rancher/v2.5/en/installation/resources/_index.md b/content/rancher/v2.5/en/installation/resources/_index.md index d5e06681c..5fe6f4dd3 100644 --- a/content/rancher/v2.5/en/installation/resources/_index.md +++ b/content/rancher/v2.5/en/installation/resources/_index.md @@ -2,7 +2,8 @@ title: Resources weight: 5 aliases: -- /rancher/v2.5/en/installation/options + - /rancher/v2.5/en/installation/options + - /rancher/v2.x/en/installation/resources/ --- ### Docker Installations diff --git a/content/rancher/v2.5/en/installation/resources/advanced/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/_index.md index f5e421955..76c2993f4 100644 --- a/content/rancher/v2.5/en/installation/resources/advanced/_index.md +++ b/content/rancher/v2.5/en/installation/resources/advanced/_index.md @@ -1,6 +1,8 @@ --- title: Advanced weight: 1000 +aliases: + - /rancher/v2.x/en/installation/resources/advanced/ --- The documents in this section contain resources for less common use cases. \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md index 2031bc16d..4250740a7 100644 --- a/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md +++ b/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md @@ -4,6 +4,7 @@ weight: 4 aliases: - /rancher/v2.5/en/installation/options/api-audit-log/ - /rancher/v2.5/en/installation/api-auditing + - /rancher/v2.x/en/installation/resources/advanced/api-audit-log/ --- You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. diff --git a/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md index 654f54f14..c18445bd4 100644 --- a/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md +++ b/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md @@ -3,6 +3,7 @@ title: "Running on ARM64 (Experimental)" weight: 3 aliases: - /rancher/v2.5/en/installation/options/arm64-platform + - /rancher/v2.x/en/installation/resources/advanced/arm64-platform/ --- > **Important:** diff --git a/content/rancher/v2.5/en/installation/resources/advanced/etcd/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/etcd/_index.md index ab4a8fd6d..4dfdf0462 100644 --- a/content/rancher/v2.5/en/installation/resources/advanced/etcd/_index.md +++ b/content/rancher/v2.5/en/installation/resources/advanced/etcd/_index.md @@ -3,6 +3,7 @@ title: Tuning etcd for Large Installations weight: 2 aliases: - /rancher/v2.5/en/installation/options/etcd + - /rancher/v2.x/en/installation/resources/advanced/etcd/ --- When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. diff --git a/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md index 791413826..419d03598 100644 --- a/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md +++ b/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md @@ -1,6 +1,8 @@ --- title: Opening Ports with firewalld weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/advanced/firewall/ --- > We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. diff --git a/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md index 07dd7d8ba..aba571407 100644 --- a/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md +++ b/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb - /rancher/v2.5/en/installation/options/single-node-install-external-lb - /rancher/v2.5/en/installation/single-node-install-external-lb + - /rancher/v2.x/en/installation/resources/advanced/single-node-install-external-lb/ --- For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. diff --git a/content/rancher/v2.5/en/installation/resources/chart-options/_index.md b/content/rancher/v2.5/en/installation/resources/chart-options/_index.md index 10b887e98..600b28554 100644 --- a/content/rancher/v2.5/en/installation/resources/chart-options/_index.md +++ b/content/rancher/v2.5/en/installation/resources/chart-options/_index.md @@ -1,6 +1,8 @@ --- title: Rancher Helm Chart Options weight: 50 +aliases: + - /rancher/v2.x/en/installation/resources/chart-options/ --- The Rancher Helm chart options reference moved to [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md b/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md index 5a1f66036..9d7ec763c 100644 --- a/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md +++ b/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md @@ -3,6 +3,7 @@ title: Choosing a Rancher Version weight: 1 aliases: - /rancher/v2.5/en/installation/options/server-tags + - /rancher/v2.x/en/installation/resources/choosing-version/ --- This section describes how to choose a Rancher version. diff --git a/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md b/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md index 9300424c9..2274e2a9a 100644 --- a/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md +++ b/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md @@ -4,6 +4,7 @@ weight: 1 aliases: - /rancher/v2.5/en/installation/options/custom-ca-root-certificate/ - /rancher/v2.5/en/installation/resources/choosing-version/encryption/custom-ca-root-certificate + - /rancher/v2.x/en/installation/resources/custom-ca-root-certificate/ --- If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md index 83ad818ff..d134423b4 100644 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md +++ b/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md @@ -4,6 +4,7 @@ weight: 17 aliases: - /rancher/v2.5/en/installation/options/feature-flags/ - /rancher/v2.5/en/admin-settings/feature-flags/ + - /rancher/v2.x/en/installation/resources/feature-flags/ --- Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md index a7a0c8f3c..d6568c937 100644 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md +++ b/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md @@ -3,6 +3,7 @@ title: Allow Unsupported Storage Drivers weight: 1 aliases: - /rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers/ + - /rancher/v2.x/en/installation/resources/feature-flags/enable-not-default-storage-drivers/ --- This feature allows you to use types for storage providers and provisioners that are not enabled by default. diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md index 1268d3627..bce3973f0 100644 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md +++ b/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md @@ -3,6 +3,7 @@ title: UI for Istio Virtual Services and Destination Rules weight: 2 aliases: - /rancher/v2.5/en/installation/options/feature-flags/istio-virtual-service-ui + - /rancher/v2.x/en/installation/resources/feature-flags/istio-virtual-service-ui/ --- This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. diff --git a/content/rancher/v2.5/en/installation/resources/helm-version/_index.md b/content/rancher/v2.5/en/installation/resources/helm-version/_index.md index 711e575b8..2c890c7e1 100644 --- a/content/rancher/v2.5/en/installation/resources/helm-version/_index.md +++ b/content/rancher/v2.5/en/installation/resources/helm-version/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/installation/options/helm2 - /rancher/v2.5/en/installation/options/helm2/helm-init - /rancher/v2.5/en/installation/options/helm2/helm-rancher + - /rancher/v2.x/en/installation/resources/helm-version/ --- This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/_index.md index efad852af..a6362f41b 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/_index.md @@ -1,6 +1,8 @@ --- title: "Don't have a Kubernetes cluster? Try one of these tutorials." weight: 4 +weight: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ --- This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md index 55c3ab6bf..35c8a2ed9 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md @@ -4,6 +4,7 @@ shortTitle: Set up RKE Kubernetes weight: 3 aliases: - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke/ --- diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md index a73add321..390dfe259 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md @@ -4,6 +4,7 @@ shortTitle: Set up RKE2 for Rancher weight: 2 aliases: - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2 + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2/ --- _Tested on v2.5.6_ diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md index d50122143..58e4c6ce1 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md @@ -1,6 +1,8 @@ --- title: About High-availability Installations weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/how-ha-works/ --- We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md index 5445da135..da6f027e4 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md @@ -2,6 +2,8 @@ title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. shortTitle: Infrastructure Tutorials weight: 5 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ --- To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md index f0bb8732c..34a1c015d 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md @@ -3,6 +3,7 @@ title: Setting up Nodes in Amazon EC2 weight: 3 aliases: - /rancher/v2.5/en/installation/options/ec2-node + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ --- In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md index a4f98a9d4..1b9101cbf 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md @@ -1,6 +1,9 @@ --- title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/ + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db/ --- This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md index 4a56d2e39..685f1ba41 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md @@ -1,6 +1,8 @@ --- title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' weight: 2 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/ --- This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md index e7a456b6d..ac26cf28e 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md @@ -1,6 +1,8 @@ --- title: 'Set up Infrastructure for a High Availability RKE2 Kubernetes Cluster' weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/ --- This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md index a19eb1b7a..f1d4652df 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md @@ -3,6 +3,7 @@ title: Setting up an NGINX Load Balancer weight: 4 aliases: - /rancher/v2.5/en/installation/options/nginx + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/ --- NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md index 2d3ed5bb9..3f586b3b4 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/installation/ha/create-nodes-lb/nlb - /rancher/v2.5/en/installation/k8s-install/create-nodes-lb/nlb - /rancher/v2.5/en/installation/options/nlb + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/ --- This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md index 99ef67787..01b3560b6 100644 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md +++ b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md @@ -3,6 +3,7 @@ title: Setting up a MySQL Database in Amazon RDS weight: 4 aliases: - /rancher/v2.5/en/installation/options/rds + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/ --- This tutorial describes how to set up a MySQL database in Amazon's RDS. diff --git a/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md b/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md index 195605dfd..eaad0ba63 100644 --- a/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md +++ b/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md - /rancher/v2.5/en/installation/options/local-system-charts + - /rancher/v2.x/en/installation/resources/local-system-charts/ --- The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. diff --git a/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md b/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md index 351bc9ed8..850856ff8 100644 --- a/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md +++ b/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md @@ -3,6 +3,7 @@ title: Adding TLS Secrets weight: 2 aliases: - /rancher/v2.5/en/installation/resources/encryption/tls-secrets/ + - /rancher/v2.x/en/installation/resources/tls-secrets/ --- Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. diff --git a/content/rancher/v2.5/en/installation/resources/tls-settings/_index.md b/content/rancher/v2.5/en/installation/resources/tls-settings/_index.md index 67d91d725..3c97f7472 100644 --- a/content/rancher/v2.5/en/installation/resources/tls-settings/_index.md +++ b/content/rancher/v2.5/en/installation/resources/tls-settings/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/installation/options/tls-settings/ - /rancher/v2.5/en/admin-settings/tls-settings - /rancher/v2.5/en/installation/resources/encryption/tls-settings + - /rancher/v2.x/en/installation/resources/tls-settings/ --- The default TLS configuration only accepts TLS 1.2 and secure TLS cipher suites. TLS 1.3 and TLS 1.3 exclusive cipher suites are not supported. diff --git a/content/rancher/v2.5/en/installation/resources/troubleshooting/_index.md b/content/rancher/v2.5/en/installation/resources/troubleshooting/_index.md index 3f0d12ed1..15d88d00f 100644 --- a/content/rancher/v2.5/en/installation/resources/troubleshooting/_index.md +++ b/content/rancher/v2.5/en/installation/resources/troubleshooting/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/installation/ha/kubernetes-rke/troubleshooting - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke/troubleshooting - /rancher/v2.5/en/installation/options/troubleshooting + - /rancher/v2.x/en/installation/resources/troubleshooting/ --- This section describes how to troubleshoot an installation of Rancher on a Kubernetes cluster. diff --git a/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md b/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md index 1ff7143f3..c256f9f59 100644 --- a/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md +++ b/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md @@ -1,6 +1,8 @@ --- title: Updating a Private CA Certificate weight: 10 +aliases: + - /rancher/v2.x/en/installation/resources/update-ca-cert/ --- Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. diff --git a/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md b/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md index 61def35ea..a3be656b6 100644 --- a/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md +++ b/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/installation/options/upgrading-cert-manager - /rancher/v2.5/en/installation/options/upgrading-cert-manager/helm-2-instructions - /rancher/v2.5/en/installation/resources/encryption/upgrading-cert-manager + - /rancher/v2.x/en/installation/resources/upgrading-cert-manager/ --- Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: diff --git a/content/rancher/v2.5/en/istio/_index.md b/content/rancher/v2.5/en/istio/_index.md index 96fee78c2..f947631d1 100644 --- a/content/rancher/v2.5/en/istio/_index.md +++ b/content/rancher/v2.5/en/istio/_index.md @@ -3,6 +3,8 @@ title: Istio weight: 14 aliases: - /rancher/v2.5/en/dashboard/istio + - /rancher/v2.x/en/istio/ + - /rancher/v2.x/en/istio/v2.5/ --- [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. diff --git a/content/rancher/v2.5/en/istio/configuration-reference/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/_index.md index 79164a8f9..9e32b645d 100644 --- a/content/rancher/v2.5/en/istio/configuration-reference/_index.md +++ b/content/rancher/v2.5/en/istio/configuration-reference/_index.md @@ -3,6 +3,7 @@ title: Configuration Options weight: 3 aliases: - /rancher/v2.5/en/istio/v2.5/configuration-reference + - /rancher/v2.x/en/istio/v2.5/configuration-reference/ --- - [Egress Support](#egress-support) diff --git a/content/rancher/v2.5/en/istio/configuration-reference/canal-and-project-network/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/canal-and-project-network/_index.md index 77f82b11b..7dd221cb4 100644 --- a/content/rancher/v2.5/en/istio/configuration-reference/canal-and-project-network/_index.md +++ b/content/rancher/v2.5/en/istio/configuration-reference/canal-and-project-network/_index.md @@ -3,6 +3,7 @@ title: Additional Steps for Project Network Isolation weight: 4 aliases: - /rancher/v2.5/en/istio/v2.5/configuration-reference/canal-and-project-network + - /rancher/v2.x/en/istio/v2.5/configuration-reference/canal-and-project-network/ --- In clusters where: diff --git a/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md index a98cbf8ff..48d1d317f 100644 --- a/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md +++ b/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-cluster/enable-istio-with-psp - /rancher/v2.5/en/istio/v2.5/configuration-reference/enable-istio-with-psp + - /rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp/ --- If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. diff --git a/content/rancher/v2.5/en/istio/configuration-reference/rke2/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/rke2/_index.md index ae4e17187..6e97f32ed 100644 --- a/content/rancher/v2.5/en/istio/configuration-reference/rke2/_index.md +++ b/content/rancher/v2.5/en/istio/configuration-reference/rke2/_index.md @@ -3,6 +3,7 @@ title: Additional Steps for Installing Istio on an RKE2 Cluster weight: 3 aliases: - /rancher/v2.5/en/istio/v2.5/configuration-reference/rke2 + - /rancher/v2.x/en/istio/v2.5/configuration-reference/rke2/ --- Through the **Cluster Explorer,** when installing or upgrading Istio through **Apps & Marketplace,** diff --git a/content/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape/_index.md index 818267243..de8eb0860 100644 --- a/content/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape/_index.md +++ b/content/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape/_index.md @@ -4,6 +4,7 @@ weight: 2 aliases: - /rancher/v2.5/en/istio/v2.5/configuration-reference/selectors-and-scrape - /rancher/v2.5/en/istio/setup/node-selectors + - /rancher/v2.x/en/istio/v2.5/configuration-reference/selectors-and-scrape/ --- The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false`, which enables monitoring across all namespaces by default. diff --git a/content/rancher/v2.5/en/istio/disabling-istio/_index.md b/content/rancher/v2.5/en/istio/disabling-istio/_index.md index d0fe6ec9c..3ac12ff22 100644 --- a/content/rancher/v2.5/en/istio/disabling-istio/_index.md +++ b/content/rancher/v2.5/en/istio/disabling-istio/_index.md @@ -3,6 +3,7 @@ title: Disabling Istio weight: 4 aliases: - /rancher/v2.5/en/istio/v2.5/disabling-istio + - /rancher/v2.x/en/istio/v2.5/disabling-istio/ --- This section describes how to uninstall Istio in a cluster or disable a namespace, or workload. diff --git a/content/rancher/v2.5/en/istio/rbac/_index.md b/content/rancher/v2.5/en/istio/rbac/_index.md index 9175e47d6..bfba9cbcb 100644 --- a/content/rancher/v2.5/en/istio/rbac/_index.md +++ b/content/rancher/v2.5/en/istio/rbac/_index.md @@ -4,6 +4,7 @@ weight: 3 aliases: - /rancher/v2.5/en/istio/rbac - /rancher/v2.5/en/istio/v2.5/rbac + - /rancher/v2.x/en/istio/v2.5/rbac/ --- This section describes the permissions required to access Istio features. diff --git a/content/rancher/v2.5/en/istio/release-notes/_index.md b/content/rancher/v2.5/en/istio/release-notes/_index.md index 855e663f0..5570eab14 100644 --- a/content/rancher/v2.5/en/istio/release-notes/_index.md +++ b/content/rancher/v2.5/en/istio/release-notes/_index.md @@ -3,6 +3,7 @@ title: Release Notes aliases: - /rancher/v2.5/en/istio/release-notes - /rancher/v2.5/en/istio/v2.5/release-notes + - /rancher/v2.x/en/istio/v2.5/release-notes/ --- # Istio 1.5.9 release notes diff --git a/content/rancher/v2.5/en/istio/resources/_index.md b/content/rancher/v2.5/en/istio/resources/_index.md index aaec78266..610080408 100644 --- a/content/rancher/v2.5/en/istio/resources/_index.md +++ b/content/rancher/v2.5/en/istio/resources/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/project-admin/istio/config/ - /rancher/v2.5/en/istio/resources - /rancher/v2.5/en/istio/v2.5/resources + - /rancher/v2.x/en/istio/v2.5/resources/ --- This section describes the minimum recommended computing resources for the Istio components in a cluster. diff --git a/content/rancher/v2.5/en/istio/setup/_index.md b/content/rancher/v2.5/en/istio/setup/_index.md index 2f590b97e..642222540 100644 --- a/content/rancher/v2.5/en/istio/setup/_index.md +++ b/content/rancher/v2.5/en/istio/setup/_index.md @@ -4,6 +4,7 @@ weight: 2 aliases: - /rancher/v2.5/en/istio/setup - /rancher/v2.5/en/istio/v2.5/setup/ + - /rancher/v2.x/en/istio/v2.5/setup/ --- This section describes how to enable Istio and start using it in your projects. diff --git a/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md b/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md index 0d2027b38..2875c2cc9 100644 --- a/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md +++ b/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md @@ -4,6 +4,7 @@ weight: 4 aliases: - /rancher/v2.5/en/istio/setup/deploy-workloads - /rancher/v2.5/en/istio/v2.5/setup/deploy-workloads + - /rancher/v2.x/en/istio/v2.5/setup/deploy-workloads/ --- > **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have the Istio app installed. diff --git a/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md index be1175394..92b8625be 100644 --- a/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md +++ b/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md @@ -4,6 +4,7 @@ weight: 1 aliases: - /rancher/v2.5/en/istio/setup/enable-istio-in-cluster - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-cluster + - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/ --- >**Prerequisites:** diff --git a/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md index 04f868bdb..6044a807c 100644 --- a/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md +++ b/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md @@ -4,6 +4,7 @@ weight: 2 aliases: - /rancher/v2.5/en/istio/setup/enable-istio-in-namespace - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-namespace + - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-namespace/ --- You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. diff --git a/content/rancher/v2.5/en/istio/setup/gateway/_index.md b/content/rancher/v2.5/en/istio/setup/gateway/_index.md index ca85b25b3..243815152 100644 --- a/content/rancher/v2.5/en/istio/setup/gateway/_index.md +++ b/content/rancher/v2.5/en/istio/setup/gateway/_index.md @@ -4,6 +4,7 @@ weight: 5 aliases: - /rancher/v2.5/en/istio/setup/gateway - /rancher/v2.5/en/istio/v2.5/setup/gateway + - /rancher/v2.x/en/istio/v2.5/setup/gateway/ --- The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. diff --git a/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md b/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md index cb21962e3..c2fc4826a 100644 --- a/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md +++ b/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md @@ -4,6 +4,7 @@ weight: 6 aliases: - /rancher/v2.5/en/istio/setup/set-up-traffic-management - /rancher/v2.5/en/istio/v2.5/setup/set-up-traffic-management + - /rancher/v2.x/en/istio/v2.5/setup/set-up-traffic-management/ --- A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. diff --git a/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md b/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md index 241bd837b..aea0643b8 100644 --- a/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md +++ b/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/istio/setup/view-traffic - /rancher/v2.5/en/istio/setup/view-traffic - /rancher/v2.5/en/istio/v2.5/setup/view-traffic + - /rancher/v2.x/en/istio/v2.5/setup/view-traffic/ --- This section describes how to view the traffic that is being managed by Istio. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/_index.md index f048d0f17..c1c3a0afe 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/_index.md @@ -5,6 +5,7 @@ aliases: - /rancher/v2.5/en/concepts/ - /rancher/v2.5/en/tasks/ - /rancher/v2.5/en/concepts/resources/ + - /rancher/v2.x/en/k8s-in-rancher/ --- > The Cluster Explorer is a new feature in Rancher v2.5 that allows you to view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. This section will be updated to reflect the way that Kubernetes resources are handled in Rancher v2.5. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md index e522f576a..929081f9f 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md @@ -4,7 +4,8 @@ description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS ( weight: 3060 aliases: - /rancher/v2.5/en/tasks/projects/add-ssl-certificates/ - - /rancher/v2.5/en/k8s-in-rancher/certificates + - /rancher/v2.5/en/k8s-in-rancher/certificates + - /rancher/v2.x/en/k8s-in-rancher/certificates/ --- When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md index 972d89ef3..dd80de596 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md @@ -4,6 +4,7 @@ weight: 3061 aliases: - /rancher/v2.5/en/tasks/projects/add-configmaps - /rancher/v2.5/en/k8s-in-rancher/configmaps + - /rancher/v2.x/en/k8s-in-rancher/configmaps/ --- While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md index 0f2ace178..8849de51a 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md @@ -4,6 +4,7 @@ description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs weight: 3026 aliases: - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/ --- The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md index aef393f45..2b4f86ba1 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md @@ -3,6 +3,7 @@ title: Background Information on HPAs weight: 3027 aliases: - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/ --- The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md index cff8260c0..d1e3900f3 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md @@ -3,6 +3,7 @@ title: Managing HPAs with kubectl weight: 3029 aliases: - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/ --- This section describes HPA management with `kubectl`. This document has instructions for how to: diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md index 7de4240a9..5c15feb07 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md @@ -3,6 +3,7 @@ title: Managing HPAs with the Rancher UI weight: 3028 aliases: - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/ --- The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md index 28dd9c5e1..0816641dd 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md @@ -1,9 +1,9 @@ --- title: Testing HPAs with kubectl weight: 3031 - aliases: - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/ --- This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md index ae0b9946a..2f5cd7cee 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md @@ -4,6 +4,7 @@ description: Learn how you can set up load balancers and ingress controllers to weight: 3040 aliases: - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress + - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ --- Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md index a9d53f025..dc700e5fc 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md @@ -5,6 +5,7 @@ weight: 3042 aliases: - /rancher/v2.5/en/tasks/workloads/add-ingress/ - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress + - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/ --- Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md index e5198a315..d30335a67 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md @@ -5,6 +5,7 @@ weight: 3041 aliases: - /rancher/v2.5/en/concepts/load-balancing/ - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers + - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/ --- Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/registries/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/registries/_index.md index 15e226ea4..e4f22b37d 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/registries/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/registries/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/tasks/projects/add-registries/ - /rancher/v2.5/en/k8s-in-rancher/registries - /rancher/v2.5/en/k8s-resources/k8s-in-rancher/registries + - /rancher/v2.x/en/k8s-in-rancher/registries/ --- Registries are Kubernetes secrets containing credentials used to authenticate with [private Docker registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md index ca4ea2c8f..5d5767eec 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md @@ -3,7 +3,8 @@ title: Secrets weight: 3062 aliases: - /rancher/v2.5/en/tasks/projects/add-a-secret - - /rancher/v2.5/en/k8s-in-rancher/secrets + - /rancher/v2.5/en/k8s-in-rancher/secrets + - /rancher/v2.x/en/k8s-in-rancher/secrets/ --- [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/service-discovery/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/service-discovery/_index.md index 9546aa818..f6f6204af 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/service-discovery/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/service-discovery/_index.md @@ -4,6 +4,7 @@ weight: 3045 aliases: - /rancher/v2.5/en/tasks/workloads/add-a-dns-record/ - /rancher/v2.5/en/k8s-in-rancher/service-discovery + - /rancher/v2.x/en/k8s-in-rancher/service-discovery/ --- For every workload created, a complementing Service Discovery entry is created. This Service Discovery entry enables DNS resolution for the workload's pods using the following naming convention: diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md index a4ae02e73..83b5147b3 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/concepts/workloads/ - /rancher/v2.5/en/tasks/workloads/ - /rancher/v2.5/en/k8s-in-rancher/workloads + - /rancher/v2.x/en/k8s-in-rancher/workloads/ --- You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md index 3d1889b91..807020f2c 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md @@ -4,6 +4,7 @@ weight: 3029 aliases: - /rancher/v2.5/en/tasks/workloads/add-a-sidecar/ - /rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar + - /rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/ --- A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md index fa184d733..c27ebdcc9 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md @@ -5,6 +5,7 @@ weight: 3026 aliases: - /rancher/v2.5/en/tasks/workloads/deploy-workloads/ - /rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads + - /rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/ --- Deploy a workload to run an application in one or more containers. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/_index.md index 4b13395a9..794b9969f 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/_index.md @@ -4,6 +4,7 @@ weight: 3027 aliases: - /rancher/v2.5/en/tasks/workloads/rollback-workloads/ - /rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads + - /rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/ --- Sometimes there is a need to rollback to the previous version of the application, either for debugging purposes or because an upgrade did not go as planned. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md index 0457f3324..5ebbe02d5 100644 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md +++ b/content/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md @@ -4,6 +4,7 @@ weight: 3028 aliases: - /rancher/v2.5/en/tasks/workloads/upgrade-workloads/ - /rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads + - /rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/ --- When a new version of an application image is released on Docker Hub, you can upgrade any workloads running a previous version of the application to the new one. diff --git a/content/rancher/v2.5/en/logging/_index.md b/content/rancher/v2.5/en/logging/_index.md index 3d7ff5feb..df4bdc8a9 100644 --- a/content/rancher/v2.5/en/logging/_index.md +++ b/content/rancher/v2.5/en/logging/_index.md @@ -8,6 +8,8 @@ aliases: - /rancher/v2.5/en/dashboard/logging - /rancher/v2.5/en/logging/v2.5 - /rancher/v2.5/en/cluster-admin/tools/logging + - /rancher/v2.x/en/logging/ + - /rancher/v2.x/en/logging/v2.5/ --- The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. diff --git a/content/rancher/v2.5/en/logging/migrating/_index.md b/content/rancher/v2.5/en/logging/migrating/_index.md index 0f05903b4..b648cb27f 100644 --- a/content/rancher/v2.5/en/logging/migrating/_index.md +++ b/content/rancher/v2.5/en/logging/migrating/_index.md @@ -3,6 +3,7 @@ title: Migrating to Rancher v2.5 Logging weight: 2 aliases: - /rancher/v2.5/en/logging/v2.5/migrating + - /rancher/v2.x/en/logging/v2.5/migrating/ --- Starting in v2.5, the logging feature available within Rancher has been completely overhauled. The [logging operator](https://github.com/banzaicloud/logging-operator) from Banzai Cloud has been adopted; Rancher configures this tooling for use when deploying logging. diff --git a/content/rancher/v2.5/en/longhorn/_index.md b/content/rancher/v2.5/en/longhorn/_index.md index 5ba7a59de..2fc1fe769 100644 --- a/content/rancher/v2.5/en/longhorn/_index.md +++ b/content/rancher/v2.5/en/longhorn/_index.md @@ -2,6 +2,8 @@ title: Longhorn - Cloud native distributed block storage for Kubernetes shortTitle: Longhorn Storage weight: 19 +aliases: + - /rancher/v2.x/en/longhorn/ --- [Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. diff --git a/content/rancher/v2.5/en/monitoring-alerting/_index.md b/content/rancher/v2.5/en/monitoring-alerting/_index.md index bfc64e387..268b3dc85 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/_index.md @@ -3,6 +3,9 @@ title: Monitoring and Alerting shortTitle: Monitoring/Alerting description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring weight: 13 +aliases: + - /rancher/v2.x/en/monitoring-alerting/ + - /rancher/v2.x/en/monitoring-alerting/v2.5/ --- Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md index 5004adfbe..e56978fbb 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md @@ -3,6 +3,8 @@ title: Configuration weight: 5 aliases: - /rancher/v2.5/en/monitoring-alerting/configuration + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/ + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/ --- This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md index 2a9b6d64e..4cd25e86c 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md @@ -1,6 +1,8 @@ --- title: Configuring PrometheusRules weight: 3 +aliases: + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/prometheusrules/ --- A PrometheusRule defines a group of Prometheus alerting and/or recording rules. diff --git a/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md b/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md index 072c0f104..5b170407e 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md @@ -7,6 +7,7 @@ aliases: - /rancher/v2.5/en/monitoring-alerting/expression - /rancher/v2.5/en/monitoring-alerting/configuration/expression - /rancher/v2.5/en/monitoring/alerting/configuration/expression + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/expression/ --- The PromQL expressions in this doc can be used to configure alerts. diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md b/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md index 7a79aafc3..552b54687 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md @@ -3,6 +3,7 @@ title: Migrating to Rancher v2.5 Monitoring weight: 9 aliases: - /rancher/v2.5/en/monitoring-alerting/migrating + - /rancher/v2.x/en/monitoring-alerting/v2.5/migrating/ --- If you previously enabled Monitoring, Alerting, or Notifiers in Rancher before v2.5, there is no automatic upgrade path for switching to the new monitoring/alerting solution. Before deploying the new monitoring solution via Cluster Explore, you will need to disable and remove all existing custom alerts, notifiers and monitoring installations for the whole cluster and in all projects. diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md b/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md index 9bf0a5485..40fa07ee3 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md @@ -3,6 +3,7 @@ title: Persistent Grafana Dashboards weight: 6 aliases: - /rancher/v2.5/en/monitoring-alerting/persist-grafana + - /rancher/v2.x/en/monitoring-alerting/v2.5/persist-grafana/ --- To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. diff --git a/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md b/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md index 3a13f1fc5..4c703ba37 100644 --- a/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md +++ b/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/cluster-admin/tools/monitoring/rbac - /rancher/v2.5/en/monitoring-alerting/rbac - /rancher/v2.5/en/monitoring-alerting/grafana + - /rancher/v2.x/en/monitoring-alerting/v2.5/rbac/ --- This section describes the expectations for RBAC for Rancher Monitoring. diff --git a/content/rancher/v2.5/en/opa-gatekeper/_index.md b/content/rancher/v2.5/en/opa-gatekeper/_index.md index 86136b6fa..e868c51b5 100644 --- a/content/rancher/v2.5/en/opa-gatekeper/_index.md +++ b/content/rancher/v2.5/en/opa-gatekeper/_index.md @@ -4,6 +4,7 @@ weight: 16 aliases: - /rancher/v2.5/en/cluster-admin/tools/opa-gatekeeper - /rancher/v2.5/en/opa-gatekeeper/Open%20Policy%20Agent + - /rancher/v2.x/en/opa-gatekeper/ --- To ensure consistency and compliance, every organization needs the ability to define and enforce policies in its environment in an automated way. [OPA (Open Policy Agent)](https://www.openpolicyagent.org/) is a policy engine that facilitates policy-based control for cloud native environments. Rancher provides the ability to enable OPA Gatekeeper in Kubernetes clusters, and also installs a couple of built-in policy definitions, which are also called constraint templates. diff --git a/content/rancher/v2.5/en/overview/_index.md b/content/rancher/v2.5/en/overview/_index.md index b59a4ae67..dea44182e 100644 --- a/content/rancher/v2.5/en/overview/_index.md +++ b/content/rancher/v2.5/en/overview/_index.md @@ -1,6 +1,8 @@ --- title: Overview weight: 1 +aliases: + - /rancher/v2.x/en/overview/ --- Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. diff --git a/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md b/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md index 30fdd1ab0..a2aa6faf9 100644 --- a/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md +++ b/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md @@ -1,6 +1,8 @@ --- title: Architecture Recommendations weight: 3 +aliases: + - /rancher/v2.x/en/overview/architecture-recommendations/ --- Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) diff --git a/content/rancher/v2.5/en/overview/architecture/_index.md b/content/rancher/v2.5/en/overview/architecture/_index.md index 6ec1dfd58..9e145d0a6 100644 --- a/content/rancher/v2.5/en/overview/architecture/_index.md +++ b/content/rancher/v2.5/en/overview/architecture/_index.md @@ -1,6 +1,8 @@ --- title: Architecture weight: 1 +aliases: + - /rancher/v2.x/en/overview/architecture/ --- This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. diff --git a/content/rancher/v2.5/en/overview/concepts/_index.md b/content/rancher/v2.5/en/overview/concepts/_index.md index 0fe2f4e91..4e9d2d89e 100644 --- a/content/rancher/v2.5/en/overview/concepts/_index.md +++ b/content/rancher/v2.5/en/overview/concepts/_index.md @@ -1,6 +1,8 @@ --- title: Kubernetes Concepts weight: 4 +aliases: + - /rancher/v2.x/en/overview/concepts/ --- This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) diff --git a/content/rancher/v2.5/en/pipelines/_index.md b/content/rancher/v2.5/en/pipelines/_index.md index 31b195a77..a47fdaf75 100644 --- a/content/rancher/v2.5/en/pipelines/_index.md +++ b/content/rancher/v2.5/en/pipelines/_index.md @@ -3,6 +3,7 @@ title: Pipelines weight: 10 aliases: - /rancher/v2.5/en/k8s-in-rancher/pipelines + - /rancher/v2.x/en/pipelines/ --- > As of Rancher v2.5, Git-based deployment pipelines are now recommended to be handled with Rancher Continuous Delivery powered by [Fleet,]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet) available in Cluster Explorer. diff --git a/content/rancher/v2.5/en/pipelines/concepts/_index.md b/content/rancher/v2.5/en/pipelines/concepts/_index.md index 46b5a1800..40285989b 100644 --- a/content/rancher/v2.5/en/pipelines/concepts/_index.md +++ b/content/rancher/v2.5/en/pipelines/concepts/_index.md @@ -3,6 +3,7 @@ title: Concepts weight: 1 aliases: - /rancher/v2.5/en/k8s-in-rancher/pipelines/concepts + - /rancher/v2.x/en/pipelines/concepts/ --- The purpose of this page is to explain common concepts and terminology related to pipelines. diff --git a/content/rancher/v2.5/en/pipelines/config/_index.md b/content/rancher/v2.5/en/pipelines/config/_index.md index 75bb1cbea..7add0b7d9 100644 --- a/content/rancher/v2.5/en/pipelines/config/_index.md +++ b/content/rancher/v2.5/en/pipelines/config/_index.md @@ -3,6 +3,7 @@ title: Pipeline Configuration Reference weight: 1 aliases: - /rancher/v2.5/en/k8s-in-rancher/pipelines/config + - /rancher/v2.x/en/pipelines/config/ --- In this section, you'll learn how to configure pipelines. diff --git a/content/rancher/v2.5/en/pipelines/example-repos/_index.md b/content/rancher/v2.5/en/pipelines/example-repos/_index.md index 5ce002a9c..ccc66147b 100644 --- a/content/rancher/v2.5/en/pipelines/example-repos/_index.md +++ b/content/rancher/v2.5/en/pipelines/example-repos/_index.md @@ -3,7 +3,8 @@ title: Example Repositories weight: 500 aliases: - /rancher/v2.5/en/tools/pipelines/quick-start-guide/ - - /rancher/v2.5/en/k8s-in-rancher/pipelines/example-repos + - /rancher/v2.5/en/k8s-in-rancher/pipelines/example-repos + - /rancher/v2.x/en/pipelines/example-repos/ --- Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: diff --git a/content/rancher/v2.5/en/pipelines/example/_index.md b/content/rancher/v2.5/en/pipelines/example/_index.md index a793d4a46..796e29041 100644 --- a/content/rancher/v2.5/en/pipelines/example/_index.md +++ b/content/rancher/v2.5/en/pipelines/example/_index.md @@ -4,6 +4,7 @@ weight: 501 aliases: - /rancher/v2.5/en/tools/pipelines/reference/ - /rancher/v2.5/en/k8s-in-rancher/pipelines/example + - /rancher/v2.x/en/pipelines/example/ --- Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. diff --git a/content/rancher/v2.5/en/pipelines/storage/_index.md b/content/rancher/v2.5/en/pipelines/storage/_index.md index d01833de7..f5eb987d4 100644 --- a/content/rancher/v2.5/en/pipelines/storage/_index.md +++ b/content/rancher/v2.5/en/pipelines/storage/_index.md @@ -3,6 +3,7 @@ title: Configuring Persistent Data for Pipeline Components weight: 600 aliases: - /rancher/v2.5/en/k8s-in-rancher/pipelines/storage + - /rancher/v2.x/en/pipelines/storage/ --- The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. diff --git a/content/rancher/v2.5/en/project-admin/_index.md b/content/rancher/v2.5/en/project-admin/_index.md index ff000ecd2..f8ecaa0b6 100644 --- a/content/rancher/v2.5/en/project-admin/_index.md +++ b/content/rancher/v2.5/en/project-admin/_index.md @@ -3,6 +3,7 @@ title: Project Administration weight: 9 aliases: - /rancher/v2.5/en/project-admin/editing-projects/ + - /rancher/v2.x/en/project-admin/ --- _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. diff --git a/content/rancher/v2.5/en/project-admin/namespaces/_index.md b/content/rancher/v2.5/en/project-admin/namespaces/_index.md index f04807483..f07a9c69b 100644 --- a/content/rancher/v2.5/en/project-admin/namespaces/_index.md +++ b/content/rancher/v2.5/en/project-admin/namespaces/_index.md @@ -1,6 +1,8 @@ --- title: Namespaces weight: 2520 +aliases: + - /rancher/v2.x/en/project-admin/namespaces/ --- Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. diff --git a/content/rancher/v2.5/en/project-admin/pipelines/_index.md b/content/rancher/v2.5/en/project-admin/pipelines/_index.md index 19bf74326..b7d5e26df 100644 --- a/content/rancher/v2.5/en/project-admin/pipelines/_index.md +++ b/content/rancher/v2.5/en/project-admin/pipelines/_index.md @@ -6,6 +6,7 @@ aliases: - /rancher/v2.5/en/concepts/ci-cd-pipelines/ - /rancher/v2.5/en/tasks/pipelines/ - /rancher/v2.5/en/tools/pipelines/configurations/ + - /rancher/v2.x/en/project-admin/pipelines/ --- Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. diff --git a/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md b/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md index dbed390d6..ddc2483a7 100644 --- a/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md +++ b/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md @@ -1,6 +1,8 @@ --- title: Pod Security Policies weight: 5600 +aliases: + - /rancher/v2.x/en/project-admin/pod-security-policies/ --- > These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/). diff --git a/content/rancher/v2.5/en/project-admin/project-members/_index.md b/content/rancher/v2.5/en/project-admin/project-members/_index.md index 67d487cf1..a8476db8e 100644 --- a/content/rancher/v2.5/en/project-admin/project-members/_index.md +++ b/content/rancher/v2.5/en/project-admin/project-members/_index.md @@ -3,7 +3,8 @@ title: Adding Users to Projects weight: 2505 aliases: - /rancher/v2.5/en/tasks/projects/add-project-members/ - - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/project-members/ + - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/project-members + - /rancher/v2.x/en/project-admin/project-members/ --- If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md index a0b948feb..77f54f689 100644 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md +++ b/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md @@ -3,6 +3,7 @@ title: Project Resource Quotas weight: 2515 aliases: - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas + - /rancher/v2.x/en/project-admin/resource-quotas/ --- In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md index 027506e48..b92fa7e37 100644 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md +++ b/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md @@ -1,6 +1,8 @@ --- title: Setting Container Default Resource Limits weight: 3 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/override-container-default/ --- When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md index ffe030b88..a3ce9a6af 100644 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md +++ b/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md @@ -1,6 +1,8 @@ --- title: Overriding the Default Limit for a Namespace weight: 2 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/ --- Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/quota-type-reference/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/quota-type-reference/_index.md index 7e5f66ed3..c3d20c529 100644 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/quota-type-reference/_index.md +++ b/content/rancher/v2.5/en/project-admin/resource-quotas/quota-type-reference/_index.md @@ -1,6 +1,8 @@ --- title: Resource Quota Type Reference weight: 4 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/ --- When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md index 63a18ba0f..45ee11e39 100644 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md +++ b/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md @@ -1,6 +1,8 @@ --- title: How Resource Quotas Work in Rancher Projects weight: 1 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/ --- Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. diff --git a/content/rancher/v2.5/en/project-admin/tools/_index.md b/content/rancher/v2.5/en/project-admin/tools/_index.md index 08b2fce4b..568b142cb 100644 --- a/content/rancher/v2.5/en/project-admin/tools/_index.md +++ b/content/rancher/v2.5/en/project-admin/tools/_index.md @@ -1,6 +1,8 @@ --- title: Tools for Logging, Monitoring, and Visibility weight: 2525 +aliases: + - /rancher/v2.x/en/project-admin/tools/ --- Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: diff --git a/content/rancher/v2.5/en/quick-start-guide/_index.md b/content/rancher/v2.5/en/quick-start-guide/_index.md index f8ee9eb6a..13ea050b0 100644 --- a/content/rancher/v2.5/en/quick-start-guide/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/_index.md @@ -3,6 +3,8 @@ title: Rancher Deployment Quick Start Guides metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. weight: 2 +aliases: + - /rancher/v2.x/en/quick-start-guide/ --- >**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). diff --git a/content/rancher/v2.5/en/quick-start-guide/cli/_index.md b/content/rancher/v2.5/en/quick-start-guide/cli/_index.md index 319967f34..954e27f6a 100644 --- a/content/rancher/v2.5/en/quick-start-guide/cli/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/cli/_index.md @@ -1,6 +1,8 @@ --- title: CLI with Rancher weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/cli/ --- Interact with Rancher using command line interface (CLI) tools from your workstation. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md index f7d4da476..b11ac98a1 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md @@ -1,6 +1,8 @@ --- title: Deploying Rancher Server weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/ --- Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md index 56bc76e66..c5f189a95 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md @@ -2,6 +2,8 @@ title: Rancher AWS Quick Start Guide description: Read this step by step Rancher AWS guide to quickly deploy a Rancher Server with a single node cluster attached. weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/ --- The following steps will quickly deploy a Rancher Server on AWS with a single node cluster attached. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md index 108b5dcb0..2e3e94777 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md @@ -2,6 +2,8 @@ title: Rancher DigitalOcean Quick Start Guide description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher Server with a single node cluster attached. weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/ --- The following steps will quickly deploy a Rancher Server on DigitalOcean with a single node cluster attached. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md index 7f7174aa0..48eaec3b4 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md @@ -2,6 +2,8 @@ title: Rancher GCP Quick Start Guide description: Read this step by step Rancher GCP guide to quickly deploy a Rancher Server with a single node cluster attached. weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/google-gcp-qs/ --- The following steps will quickly deploy a Rancher server on GCP in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md index 2c8605a37..a77fede61 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md @@ -2,6 +2,8 @@ title: Rancher Azure Quick Start Guide description: Read this step by step Rancher Azure guide to quickly deploy a Rancher Server with a single node cluster attached. weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/microsoft-azure-qs/ --- The following steps will quickly deploy a Rancher server on Azure in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md index 83e895694..770940ff9 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md @@ -1,6 +1,8 @@ --- title: Manual Quick Start weight: 300 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/ --- Howdy Partner! This tutorial walks you through: diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md index 664f4bb5b..b17564540 100644 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md @@ -1,6 +1,8 @@ --- title: Vagrant Quick Start weight: 200 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/ --- The following steps quickly deploy a Rancher Server with a single node cluster attached. diff --git a/content/rancher/v2.5/en/quick-start-guide/workload/_index.md b/content/rancher/v2.5/en/quick-start-guide/workload/_index.md index a3be7493b..62df76b39 100644 --- a/content/rancher/v2.5/en/quick-start-guide/workload/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/workload/_index.md @@ -1,6 +1,8 @@ --- title: Deploying Workloads weight: 200 +aliases: + - /rancher/v2.x/en/quick-start-guide/workload/ --- These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. diff --git a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md index 546b18ad1..cf197acad 100644 --- a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md @@ -1,6 +1,8 @@ --- title: Workload with Ingress Quick Start weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/ --- ### Prerequisite diff --git a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md index 6defc26b8..9984dc2be 100644 --- a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md @@ -1,6 +1,8 @@ --- title: Workload with NodePort Quick Start weight: 200 +aliases: + - /rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/ --- ### Prerequisite diff --git a/content/rancher/v2.5/en/security/_index.md b/content/rancher/v2.5/en/security/_index.md index 8586f78b9..72707dc1f 100644 --- a/content/rancher/v2.5/en/security/_index.md +++ b/content/rancher/v2.5/en/security/_index.md @@ -1,6 +1,9 @@ --- title: Security weight: 20 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/ + - /rancher/v2.x/en/security/ --- diff --git a/content/rancher/v2.5/en/security/cve/_index.md b/content/rancher/v2.5/en/security/cve/_index.md index 7b7b61ece..a8d6ac420 100644 --- a/content/rancher/v2.5/en/security/cve/_index.md +++ b/content/rancher/v2.5/en/security/cve/_index.md @@ -1,6 +1,8 @@ --- title: Rancher CVEs and Resolutions weight: 300 +aliases: + - /rancher/v2.x/en/security/cve/ --- Rancher is committed to informing the community of security issues in our products. Rancher will publish CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md index 463446b78..02b34e42f 100644 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md +++ b/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md @@ -1,6 +1,8 @@ --- title: CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5 weight: 201 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/ --- ### CIS v1.5 Kubernetes Benchmark - Rancher v2.5 with Kubernetes v1.15 diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md index 13e8edb74..26907ab28 100644 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md +++ b/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md @@ -1,6 +1,8 @@ --- title: Hardening Guide with CIS 1.5 Benchmark weight: 200 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.5-hardening-2.5/ --- This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md index d7803779e..57b65e5b0 100644 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md +++ b/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md @@ -1,6 +1,8 @@ --- title: CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4 weight: 101 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/ --- ### CIS 1.6 Kubernetes Benchmark - Rancher v2.5.4 with Kubernetes v1.18 diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md index b504be806..3836853aa 100644 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md +++ b/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md @@ -1,6 +1,8 @@ --- title: Hardening Guide with CIS 1.6 Benchmark weight: 100 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/ --- This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). diff --git a/content/rancher/v2.5/en/system-tools/_index.md b/content/rancher/v2.5/en/system-tools/_index.md index 2daccc1de..87db00915 100644 --- a/content/rancher/v2.5/en/system-tools/_index.md +++ b/content/rancher/v2.5/en/system-tools/_index.md @@ -1,6 +1,8 @@ --- title: System Tools weight: 22 +aliases: + - /rancher/v2.x/en/system-tools/ --- System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters or [installations of Rancher on an RKE cluster.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) The tasks include: diff --git a/content/rancher/v2.5/en/troubleshooting/_index.md b/content/rancher/v2.5/en/troubleshooting/_index.md index bb761060c..7fbcdd401 100644 --- a/content/rancher/v2.5/en/troubleshooting/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/_index.md @@ -1,6 +1,8 @@ --- title: Troubleshooting weight: 26 +aliases: + - /rancher/v2.x/en/troubleshooting/ --- This section contains information to help you troubleshoot issues when using Rancher. diff --git a/content/rancher/v2.5/en/troubleshooting/dns/_index.md b/content/rancher/v2.5/en/troubleshooting/dns/_index.md index c7834da7c..90892ee26 100644 --- a/content/rancher/v2.5/en/troubleshooting/dns/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/dns/_index.md @@ -1,6 +1,8 @@ --- title: DNS weight: 103 +aliases: + - /rancher/v2.x/en/troubleshooting/dns/ --- The commands/steps listed on this page can be used to check name resolution issues in your cluster. diff --git a/content/rancher/v2.5/en/troubleshooting/imported-clusters/_index.md b/content/rancher/v2.5/en/troubleshooting/imported-clusters/_index.md index 9a07ea6e9..533771973 100644 --- a/content/rancher/v2.5/en/troubleshooting/imported-clusters/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/imported-clusters/_index.md @@ -1,6 +1,8 @@ --- title: Registered clusters weight: 105 +aliases: + - /rancher/v2.x/en/troubleshooting/imported-clusters/ --- The commands/steps listed on this page can be used to check clusters that you are registering or that are registered in Rancher. diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md index a92e51f76..7e935b42d 100644 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md @@ -1,6 +1,8 @@ --- title: Kubernetes Components weight: 100 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/ --- The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters. diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md index d4d7d347e..0508f71fb 100644 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md @@ -1,6 +1,8 @@ --- title: Troubleshooting Controlplane Nodes weight: 2 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/ --- This section applies to nodes with the `controlplane` role. diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd/_index.md index f83d241a0..593fb2f4b 100644 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd/_index.md @@ -1,6 +1,8 @@ --- title: Troubleshooting etcd Nodes weight: 1 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/ --- This section contains commands and tips for troubleshooting nodes with the `etcd` role. diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md index 70505e962..7f9a61ba1 100644 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md @@ -1,6 +1,8 @@ --- title: Troubleshooting nginx-proxy weight: 3 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/ --- The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md index 28ee4499b..133d45242 100644 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md @@ -1,6 +1,8 @@ --- title: Troubleshooting Worker Nodes and Generic Components weight: 4 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/ --- This section applies to every node as it includes components that run on nodes with any role. diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md index f32e0ed0c..1853ef057 100644 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md @@ -1,6 +1,8 @@ --- title: Kubernetes resources weight: 101 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-resources/ --- The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters. diff --git a/content/rancher/v2.5/en/troubleshooting/logging/_index.md b/content/rancher/v2.5/en/troubleshooting/logging/_index.md index 0c038f81e..630554fff 100644 --- a/content/rancher/v2.5/en/troubleshooting/logging/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/logging/_index.md @@ -1,6 +1,8 @@ --- title: Logging weight: 110 +aliases: + - /rancher/v2.x/en/troubleshooting/logging/ --- The following log levels are used in Rancher: diff --git a/content/rancher/v2.5/en/troubleshooting/networking/_index.md b/content/rancher/v2.5/en/troubleshooting/networking/_index.md index 9979ef2e2..1a27333ba 100644 --- a/content/rancher/v2.5/en/troubleshooting/networking/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/networking/_index.md @@ -1,6 +1,8 @@ --- title: Networking weight: 102 +aliases: + - /rancher/v2.x/en/troubleshooting/networking/ --- The commands/steps listed on this page can be used to check networking related issues in your cluster. diff --git a/content/rancher/v2.5/en/troubleshooting/rancherha/_index.md b/content/rancher/v2.5/en/troubleshooting/rancherha/_index.md index a30b664c9..610a0b16c 100644 --- a/content/rancher/v2.5/en/troubleshooting/rancherha/_index.md +++ b/content/rancher/v2.5/en/troubleshooting/rancherha/_index.md @@ -1,6 +1,8 @@ --- title: Rancher HA weight: 104 +aliases: + - /rancher/v2.x/en/troubleshooting/rancherha/ --- The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. diff --git a/content/rancher/v2.5/en/user-settings/_index.md b/content/rancher/v2.5/en/user-settings/_index.md index 6d47ef975..f8ed1a7c7 100644 --- a/content/rancher/v2.5/en/user-settings/_index.md +++ b/content/rancher/v2.5/en/user-settings/_index.md @@ -3,6 +3,7 @@ title: User Settings weight: 23 aliases: - /rancher/v2.5/en/tasks/user-settings/ + - /rancher/v2.x/en/user-settings/ --- Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. diff --git a/content/rancher/v2.5/en/user-settings/api-keys/_index.md b/content/rancher/v2.5/en/user-settings/api-keys/_index.md index eac95192d..555a99ad0 100644 --- a/content/rancher/v2.5/en/user-settings/api-keys/_index.md +++ b/content/rancher/v2.5/en/user-settings/api-keys/_index.md @@ -4,6 +4,7 @@ weight: 7005 aliases: - /rancher/v2.5/en/concepts/api-keys/ - /rancher/v2.5/en/tasks/user-settings/api-keys/ + - /rancher/v2.x/en/user-settings/api-keys/ --- ## API Keys and User Authentication diff --git a/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md b/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md index 2af7a7374..1c7847e8f 100644 --- a/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md +++ b/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md @@ -1,6 +1,8 @@ --- title: Managing Cloud Credentials weight: 7011 +aliases: + - /rancher/v2.x/en/user-settings/cloud-credentials/ --- When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. diff --git a/content/rancher/v2.5/en/user-settings/node-templates/_index.md b/content/rancher/v2.5/en/user-settings/node-templates/_index.md index 03546f4b6..b33e05d26 100644 --- a/content/rancher/v2.5/en/user-settings/node-templates/_index.md +++ b/content/rancher/v2.5/en/user-settings/node-templates/_index.md @@ -1,6 +1,8 @@ --- title: Managing Node Templates weight: 7010 +aliases: + - /rancher/v2.x/en/user-settings/node-templates/ --- When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: diff --git a/content/rancher/v2.5/en/user-settings/preferences/_index.md b/content/rancher/v2.5/en/user-settings/preferences/_index.md index 8776329a6..a692f70e7 100644 --- a/content/rancher/v2.5/en/user-settings/preferences/_index.md +++ b/content/rancher/v2.5/en/user-settings/preferences/_index.md @@ -1,6 +1,8 @@ --- title: User Preferences weight: 7012 +aliases: + - /rancher/v2.x/en/user-settings/preferences/ --- Each user can choose preferences to personalize their Rancher experience. To change preference settings, open the **User Settings** menu and then select **Preferences**. diff --git a/content/rancher/v2.x/_index.md b/content/rancher/v2.x/_index.md deleted file mode 100644 index 462704d4c..000000000 --- a/content/rancher/v2.x/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: v2.x -weight: 2 -showBreadcrumb: false ---- diff --git a/content/rancher/v2.x/en/_index.md b/content/rancher/v2.x/en/_index.md deleted file mode 100644 index 201c6f0de..000000000 --- a/content/rancher/v2.x/en/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Pre-Versioned Docs from 2.0-2.5.6 (Formerly 2.x)" -shortTitle: "Rancher 2.5-2.5.6" -description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -metaTitle: "Rancher 2.x Docs: What is New?" -metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -insertOneSix: false -weight: 2 -ctaBanner: 0 ---- - -> We are transitioning to versioned documentation. The Rancher v2.5 docs are [here.]({{}}/rancher/v2.5/en/) The Rancher v2.0-v2.4 docs are [here.]({{}}/rancher/v2.0-v2.4/en/) We recommend using the versioned docs because they are easier to read and navigate. -> -> This section, also called 2.x, contains information for versions v2.0-2.5.6. This section is still on the Rancher website so that search results won't return 404 errors, but it will no longer be maintained. - -Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher 2.x exclusively deploys and manages Kubernetes clusters running anywhere, on any provider. - -Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. - -One Rancher server installation can manage thousands of Kubernetes clusters and thousands of nodes from the same user interface. - -Rancher adds significant value on top of Kubernetes, first by centralizing authentication and role-based access control (RBAC) for all of the clusters, giving global admins the ability to control cluster access from one location. - -It then enables detailed monitoring and alerting for clusters and their resources, ships logs to external providers, and integrates directly with Helm via the Application Catalog. If you have an external CI/CD system, you can plug it into Rancher, but if you don't, Rancher even includes [Fleet](http://fleet.rancher.io/) to help you automatically deploy and upgrade workloads. - -Rancher is a _complete_ container management platform for Kubernetes, giving you the tools to successfully run Kubernetes anywhere. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/_index.md b/content/rancher/v2.x/en/admin-settings/_index.md deleted file mode 100644 index 30edb92b9..000000000 --- a/content/rancher/v2.x/en/admin-settings/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Authentication, Permissions and Global Configuration -weight: 6 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/ - - /rancher/v2.x/en/tasks/global-configuration/ - - /rancher/v2.x/en/concepts/global-configuration/server-url/ - - /rancher/v2.x/en/tasks/global-configuration/server-url/ - - /rancher/v2.x/en/admin-settings/log-in/ ---- - -After installation, the [system administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. - -## First Log In - -After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. - ->**Important!** After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. - -## Authentication - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. - -For more information how authentication works and how to configure each provider, see [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/). - -## Authorization - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. - -For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{}}/rancher/v2.x/en/admin-settings/rbac/). - -## Pod Security Policies - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. - -For more information how to create and use PSPs, see [Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). - -## Provisioning Drivers - -Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -For more information, see [Provisioning Drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/). - -## Adding Kubernetes Versions into Rancher - -_Available as of v2.3.0_ - -With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. - -The information that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/) - -Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/). - -## Enabling Experimental Features - -_Available as of v2.3.0_ - -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) diff --git a/content/rancher/v2.x/en/admin-settings/authentication/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/_index.md deleted file mode 100644 index c142d1789..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Authentication -weight: 1115 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/authentication/ - - /rancher/v2.x/en/tasks/global-configuration/authentication/ ---- - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. - -This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. - -## External vs. Local Authentication - -The Rancher authentication proxy integrates with the following external authentication services. The following table lists the first version of Rancher each service debuted. - -| Auth Service | Available as of | -| ------------------------------------------------------------------------------------------------ | ---------------- | -| [Microsoft Active Directory]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/) | v2.0.0 | -| [GitHub]({{}}/rancher/v2.x/en/admin-settings/authentication/github/) | v2.0.0 | -| [Microsoft Azure AD]({{}}/rancher/v2.x/en/admin-settings/authentication/azure-ad/) | v2.0.3 | -| [FreeIPA]({{}}/rancher/v2.x/en/admin-settings/authentication/freeipa/) | v2.0.5 | -| [OpenLDAP]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap/) | v2.0.5 | -| [Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/) | v2.0.7 | -| [PingIdentity]({{}}/rancher/v2.x/en/admin-settings/authentication/ping-federate/) | v2.0.7 | -| [Keycloak]({{}}/rancher/v2.x/en/admin-settings/authentication/keycloak/) | v2.1.0 | -| [Okta]({{}}/rancher/v2.x/en/admin-settings/authentication/okta/) | v2.2.0 | -| [Google OAuth]({{}}/rancher/v2.x/en/admin-settings/authentication/google/) | v2.3.0 | -| [Shibboleth]({{}}/rancher/v2.x/en/admin-settings/authentication/shibboleth) | v2.4.0 | - -
-However, Rancher also provides [local authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/local/). - -In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. - -## Users and Groups - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.x/en/admin-settings/rbac/). - -> **Note:** Local authentication does not support creating or managing groups. - -For more information, see [Users and Groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) - -## Scope of Rancher Authorization - -After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: - -| Access Level | Description | -|----------------------------------------------|-------------| -| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | -| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | -| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | - -To set the Rancher access level for users in the authorization service, follow these steps: - -1. From the **Global** view, click **Security > Authentication.** - -1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. - -1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. - -1. Click **Save.** - -**Result:** The Rancher access configuration settings are applied. - -{{< saml_caveats >}} - -## External Authentication Configuration and Principal Users - -Configuration of external authentication requires: - -- A local user assigned the administrator role, called hereafter the _local principal_. -- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. - -Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. - -1. Sign into Rancher as the local principal and complete configuration of external authentication. - - ![Sign In]({{}}/img/rancher/sign-in.png) - -2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. - - ![Principal ID Sharing]({{}}/img/rancher/principal-ID.png) - -3. After you complete configuration, Rancher automatically signs out the local principal. - - ![Sign Out Local Principal]({{}}/img/rancher/sign-out-local.png) - -4. Then, Rancher automatically signs you back in as the external principal. - - ![Sign In External Principal]({{}}/img/rancher/sign-in-external.png) - -5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. - - ![Sign In External Principal]({{}}/img/rancher/users-page.png) - -6. The external principal and the local principal share the same access rights. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md deleted file mode 100644 index 2dc538db1..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Configuring Active Directory (AD) -weight: 1112 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/authentication/active-directory/ ---- - -If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. - -Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap) integration. - -> **Note:** -> -> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -## Prerequisites - -You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. - -Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. - -Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. - -> **Using TLS?** -> -> If the certificate used by the AD server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configuration Steps -### Open Active Directory Configuration - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **Active Directory**. The **Configure an AD server** form will be displayed. - -### Configure Active Directory Server Settings - -In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. - -> **Note:** -> -> If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). - -**Table 1: AD Server parameters** - -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the AD server | -| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | -| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | -| Service Account Password | The password for the service account. | -| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | -| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| - ---- - -### Configure User/Group Schema - -In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. - -Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. - -> **Note:** -> -> If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. - -#### User Schema - -The table below details the parameters for the user schema section configuration. - -**Table 2: User schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | -| User Member Attribute | The attribute containing the groups that a user is a member of. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | -| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | -| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | -| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | - ---- - -#### Group Schema - -The table below details the parameters for the group schema configuration. - -**Table 3: Group schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | -| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organisation makes use of these nested memberships (ie. you have groups that contain other groups as members. We advise avoiding nested groups when possible). | - ---- - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. - -> **Note:** -> -> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. - -1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. -2. Click **Authenticate with Active Directory** to finalise the setup. - -**Result:** - -- Active Directory authentication has been enabled. -- You have been signed into Rancher as administrator using the provided AD credentials. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Identify Search Base and Schema using ldapsearch - -In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. - -The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. - -For the purpose of the example commands provided below we will assume: - -- The Active Directory server has a hostname of `ad.acme.com` -- The server is listening for unencrypted connections on port `389` -- The Active Directory domain is `acme` -- You have a valid AD account with the username `jdoe` and password `secret` - -### Identify Search Base - -First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" -``` - -This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: - -{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} - -Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. - -Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, ie. `OU=Groups,DC=acme,DC=com`. - -### Identify User Schema - -The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: - -- `Object Class`: **person** [1] -- `Username Attribute`: **name** [2] -- `Login Attribute`: **sAMAccountName** [3] -- `User Member Attribute`: **memberOf** [4] - -> **Note:** -> -> If the AD users in our organisation were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. - -We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. - -### Identify Group Schema - -Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ --s sub "CN=examplegroup" -``` - -This command will inform us on the attributes used for group objects: - -{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} - -Again, this allows us to determine the correct values to enter in the group schema configuration: - -- `Object Class`: **group** [1] -- `Name Attribute`: **name** [2] -- `Group Member Mapping Attribute`: **member** [3] -- `Search Attribute`: **sAMAccountName** [4] - -Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. - -In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md deleted file mode 100644 index 1400dfb6c..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: Configuring Azure AD -weight: 1115 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/authentication/azure-ad/ ---- - -_Available as of v2.0.3_ - -If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. - ->**Note:** Azure AD integration only supports Service Provider initiated logins. - ->**Prerequisite:** Have an instance of Azure AD configured. - ->**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). - -## Azure Active Directory Configuration Outline - -Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. - - - ->**Tip:** Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. - - - -- [1. Register Rancher with Azure](#1-register-rancher-with-azure) -- [2. Create an Azure API Key](#2-create-an-azure-api-key) -- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) -- [4. Copy Azure Application Data](#4-copy-azure-application-data) -- [5. Configure Azure AD in Rancher](#5-configure-azure-ad-in-rancher) - - - -### 1. Register Rancher with Azure - -Before enabling Azure AD within Rancher, you must register Rancher with Azure. - -1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. - -1. Use search to open the **App registrations** service. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - -1. Click **New registrations** and complete the **Create** form. - - ![New App Registration]({{}}/img/rancher/new-app-registration.png) - - 1. Enter a **Name** (something like `Rancher`). - - 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. - - 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - - 1. Click **Register**. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 2. Create a new client secret - -From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. - -1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. - - ![Open Rancher Registration]({{}}/img/rancher/open-rancher-app.png) - -1. From the navigation pane on left, click **Certificates and Secrets**. - -1. Click **New client secret**. - - ![Create new client secret]({{< baseurl >}}/img/rancher/select-client-secret.png) - - 1. Enter a **Description** (something like `Rancher`). - - 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. - - 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). - - -1. Copy the key value and save it to an [empty text file](#tip). - - You'll enter this key into the Rancher UI later as your **Application Secret**. - - You won't be able to access the key value again within the Azure UI. - -### 3. Set Required Permissions for Rancher - -Next, set API permissions for Rancher within Azure. - -1. From the navigation pane on left, select **API permissions**. - - ![Open Required Permissions]({{}}/img/rancher/select-required-permissions.png) - -1. Click **Add a permission**. - -1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: - - ![Select API Permissions]({{< baseurl >}}/img/rancher/select-required-permissions-2.png) - -
-
- - **Access the directory as the signed-in user** - - **Read directory data** - - **Read all groups** - - **Read all users' full profiles** - - **Read all users' basic profiles** - - **Sign in and read user profile** - -1. Click **Add permissions**. - -1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. - - >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. - - -### 4. Add a Reply URL - -To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. - - -1. From the **Setting** blade, select **Reply URLs**. - - ![Azure: Enter Reply URL]({{}}/img/rancher/enter-azure-reply-url.png) - -1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - -1. Click **Save**. - -**Result:** Your reply URL is saved. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 5. Copy Azure Application Data - -As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. - -1. Obtain your Rancher **Tenant ID**. - - 1. Use search to open the **Azure Active Directory** service. - - ![Open Azure Active Directory]({{}}/img/rancher/search-azure-ad.png) - - 1. From the left navigation pane, open **Overview**. - - 2. Copy the **Directory ID** and paste it into your [text file](#tip). - - You'll paste this value into Rancher as your **Tenant ID**. - -1. Obtain your Rancher **Application ID**. - - 1. Use search to open **App registrations**. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - - 1. Find the entry you created for Rancher. - - 1. Copy the **Application ID** and paste it to your [text file](#tip). - -1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. - - 1. From **App registrations**, click **Endpoints**. - - ![Click Endpoints]({{}}/img/rancher/click-endpoints.png) - - 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). - - - **Microsoft Graph API endpoint** (Graph Endpoint) - - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) - - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) - ->**Note:** Copy the v1 version of the endpoints - -### 5. Configure Azure AD in Rancher - -From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. - -Enter the values that you copied to your [text file](#tip). - -1. Log into Rancher. From the **Global** view, select **Security > Authentication**. - -1. Select **Azure AD**. - -1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). - - >**Important:** When entering your Graph Endpoint, remove the tenant ID from the URL, like below. - > - >https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c - - The following table maps the values you copied in the Azure portal to the fields in Rancher. - - | Rancher Field | Azure Value | - | ------------------ | ------------------------------------- | - | Tenant ID | Directory ID | - | Application ID | Application ID | - | Application Secret | Key Value | - | Endpoint | https://login.microsoftonline.com/ | - | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | - | Token Endpoint | OAuth 2.0 Token Endpoint | - | Auth Endpoint | OAuth 2.0 Authorization Endpoint | - -1. Click **Authenticate with Azure**. - -**Result:** Azure Active Directory authentication is configured. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md deleted file mode 100644 index 37d8ba2e2..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/freeipa/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Configuring FreeIPA -weight: 1114 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/authentication/freeipa/ ---- - -_Available as of v2.0.5_ - -If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. - ->**Prerequisites:** -> ->- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. ->- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. ->- Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **FreeIPA**. - -4. Complete the **Configure an FreeIPA server** form. - - You may need to log in to your domain controller to find the information requested in the form. - - >**Using TLS?** - >If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. -
-
- >**User Search Base vs. Group Search Base** - > - >Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. - > - >* If your users and groups are in the same search base, complete only the User Search Base. - >* If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. - -5. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. - - >**Search Attribute** The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. - > - >The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. - > - > * `uid`: User ID - > * `sn`: Last Name - > * `givenName`: First Name - > - > With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. - -6. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. - -**Result:** - -- FreeIPA authentication is configured. -- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md deleted file mode 100644 index 9e2c4266c..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/github/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Configuring GitHub -weight: 1116 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/authentication/github/ ---- - -In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. - ->**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **GitHub**. - -4. Follow the directions displayed to **Setup a GitHub Application**. Rancher redirects you to GitHub to complete registration. - - >**What's an Authorization Callback URL?** - > - >The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). - - >When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. - -5. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - - >**Where do I find the Client ID and Client Secret?** - > - >From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. - -6. Click **Authenticate with GitHub**. - -7. Use the **Site Access** options to configure the scope of user authorization. - - - **Allow any valid Users** - - _Any_ GitHub user can access Rancher. We generally discourage use of this setting! - - - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** - - Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. - - - **Restrict access to only Authorized Users and Organizations** - - Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. -
-8. Click **Save**. - -**Result:** - -- GitHub authentication is configured. -- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md deleted file mode 100644 index 5266e219d..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Configuring Google OAuth ---- -_Available as of v2.3.0_ - -If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. - -Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. - -Within Rancher, only administrators or users with the **Manage Authentication** [global role]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) can configure authentication. - -# Prerequisites -- You must have a [G Suite admin account](https://admin.google.com) configured. -- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. -- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) - -After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: -![Enable Admin APIs]({{}}/img/rancher/Google-Enable-APIs-Screen.png) - -# Setting up G Suite for OAuth with Rancher -Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: - -1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) -1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) -1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) -1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) - -### 1. Adding Rancher as an Authorized Domain -1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. -1. Select your project and click **OAuth consent screen.** -![OAuth Consent Screen]({{}}/img/rancher/Google-OAuth-consent-screen-tab.png) -1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) -1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. - -**Result:** Rancher has been added as an authorized domain for the Admin SDK API. - -### 2. Creating OAuth2 Credentials for the Rancher Server -1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) -![Credentials]({{}}/img/rancher/Google-Credentials-tab.png) -1. On the **Create Credentials** dropdown, select **OAuth client ID.** -1. Click **Web application.** -1. Provide a name. -1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. - - Under **Authorized JavaScript origins,** enter your Rancher server URL. - - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. -1. Click on **Create.** -1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. - -**Result:** Your OAuth credentials have been successfully created. - -### 3. Creating Service Account Credentials -Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. - -Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. - -As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. - -This section describes how to: - -- Create a service account -- Create a key for the service account and download the credentials as JSON - -1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. -1. Click on **Create Service Account.** -1. Enter a name and click **Create.** -![Service account creation Step 1]({{}}/img/rancher/Google-svc-acc-step1.png) -1. Don't provide any roles on the **Service account permissions** page and click **Continue** -![Service account creation Step 2]({{}}/img/rancher/Google-svc-acc-step2.png) -1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. -![Service account creation Step 3]({{}}/img/rancher/Google-svc-acc-step3-key-creation.png) - -**Result:** Your service account is created. - -### 4. Register the Service Account Key as an OAuth Client - -You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. - -Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: - -1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** - - ![Service account Unique ID]({{}}/img/rancher/Google-Select-UniqueID-column.png) -1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) -1. Add the Unique ID obtained in the previous step in the **Client Name** field. -1. In the **One or More API Scopes** field, add the following scopes: - ``` - openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly - ``` -1. Click **Authorize.** - -**Result:** The service account is registered as an OAuth client in your G Suite account. - -# Configuring Google OAuth in Rancher -1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. -1. From the **Global** view, click **Security > Authentication** from the main menu. -1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. - 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. - 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. - 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. - - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) - - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. - - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. -1. Click **Authenticate with Google**. -1. Click **Save**. - -**Result:** Google authentication is successfully configured. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md deleted file mode 100644 index 042517343..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Configuring Keycloak (SAML) -description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins -weight: 1200 ---- -_Available as of v2.1.0_ - -If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -## Prerequisites - -- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. -- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. - - Setting | Value - ------------|------------ - `Sign Documents` | `ON` 1 - `Sign Assertions` | `ON` 1 - All other `ON/OFF` Settings | `OFF` - `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 - `Client Name` | (e.g. `rancher`) - `Client Protocol` | `SAML` - `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` - - >1: Optionally, you can enable either one or both of these settings. - >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. - - {{< img "/img/rancher/keycloak/keycloak-saml-client-configuration.png" "">}} - -- In the new SAML client, create Mappers to expose the users fields - - Add all "Builtin Protocol Mappers" - {{< img "/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png" "">}} - - Create a new "Group list" mapper to map the member attribute to a user's groups - {{< img "/img/rancher/keycloak/keycloak-saml-client-group-mapper.png" "">}} -- Export a `metadata.xml` file from your Keycloak client: - From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. - - >**Note** - > Keycloak versions 6.0.0 and up no longer provide the IDP metadata under the `Installation` tab. - > You can still get the XML from the following url: - > - > `https://{KEYCLOAK-URL}/auth/realms/{REALM-NAME}/protocol/saml/descriptor` - > - > The XML obtained from this URL contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: - > - > * Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. - > * Remove the `` tag from the beginning. - > * Remove the `` from the end of the xml. - > - > You are left with something similar as the example below: - > - > ``` - > - > .... - > - > ``` - -## Configuring Keycloak in Rancher - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Keycloak**. - -1. Complete the **Configure Keycloak Account** form. - - - | Field | Description | - | ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | - | Display Name Field | The attribute that contains the display name of users.

Example: `givenName` | - | User Name Field | The attribute that contains the user name/given name.

Example: `email` | - | UID Field | An attribute that is unique to every user.

Example: `email` | - | Groups Field | Make entries for managing group memberships.

Example: `member` | - | Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | - | Rancher API Host | The URL for your Rancher Server. | - | Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | - | IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | - - >**Tip:** You can generate a key/certificate pair using an openssl command. For example: - > - > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert - - -1. After you complete the **Configure Keycloak Account** form, click **Authenticate with Keycloak**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. - -{{< saml_caveats >}} - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. - -### You are not redirected to Keycloak - -When you click on **Authenticate with Keycloak**, your are not redirected to your IdP. - - * Verify your Keycloak client configuration. - * Make sure `Force Post Binding` set to `OFF`. - - -### Forbidden message displayed after IdP login - -You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. - - * Check the Rancher debug log. - * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. - -### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata - -This is usually due to the metadata not being created until a SAML provider is configured. -Try configuring and saving keycloak as your SAML provider and then accessing the metadata. - -### Keycloak Error: "We're sorry, failed to process response" - - * Check your Keycloak log. - * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. - -### Keycloak Error: "We're sorry, invalid requester" - - * Check your Keycloak log. - * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md deleted file mode 100644 index 3044cc298..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Local Authentication -weight: 1111 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/authentication/local-authentication/ ---- - -Local authentication is the default until you configure an external authentication provider. Local authentication is where Rancher stores the user information, i.e. names and passwords, of who can log in to Rancher. By default, the `admin` user that logs in to Rancher for the first time is a local user. - -## Adding Local Users - -Regardless of whether you use external authentication, you should create a few local authentication users so that you can continue using Rancher if your external authentication service encounters issues. - -1. From the **Global** view, select **Users** from the navigation bar. - -2. Click **Add User**. Then complete the **Add User** form. Click **Create** when you're done. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md deleted file mode 100644 index 29a958353..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Configuring Microsoft Active Directory Federation Service (SAML) -weight: 1205 ---- -_Available as of v2.0.7_ - -If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. - -## Prerequisites - -You must have Rancher installed. - -- Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. -- You must have a global administrator account on your Rancher installation. - -You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. - -- Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. -- You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. - -## Setup Outline - -Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. - -- [1. Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) -- [2. Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) - -{{< saml_caveats >}} - - -### [Next: Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md deleted file mode 100644 index e71b19275..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 1. Configuring Microsoft AD FS for Rancher -weight: 1205 ---- - -Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. - -1. Log into your AD server as an administrative user. - -1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - - {{< img "/img/rancher/adfs/adfs-overview.png" "">}} - -1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - - {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} - -1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - - {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} - -1. Select **AD FS profile** as the configuration profile for your relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} - -1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - - {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} - -1. Select **Enable support for the SAML 2.0 WebSSO protocol** - and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - - {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} - -1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} - -1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - - {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} - -1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} - -1. After reviewing your settings, select **Next** to add the relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} - - -1. Select **Open the Edit Claim Rules...** and click **Close**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} - -1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - - {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} - -1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - - {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} - -1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: - - | LDAP Attribute | Outgoing Claim Type | - | -------------------------------------------- | ------------------- | - | Given-Name | Given Name | - | User-Principal-Name | UPN | - | Token-Groups - Qualified by Long Domain Name | Group | - | SAM-Account-Name | Name | -
- {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} - -1. Download the `federationmetadata.xml` from your AD server at: -``` -https:///federationmetadata/2007-06/federationmetadata.xml -``` - -**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. - -### [Next: Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md deleted file mode 100644 index 4a9d9e6b0..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: 2. Configuring Rancher for Microsoft AD FS -weight: 1205 ---- -_Available as of v2.0.7_ - -After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. - ->**Important Notes For Configuring Your AD FS Server:** -> ->- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` ->- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` ->- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Microsoft Active Directory Federation Services**. - -1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The [configuration section below](#configuration) describe how you can map AD attributes to fields within Rancher. - - - - - - - - -1. After you complete the **Configure AD FS Account** form, click **Authenticate with AD FS**, which is at the bottom of the page. - - Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. - - >**Note:** You may have to disable your popup blocker to see the AD FS login page. - -**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. - -# Configuration - -| Field | Description | -|---------------------------|-----------------| -| Display Name Field | The AD attribute that contains the display name of users.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | -| User Name Field | The AD attribute that contains the user name/given name.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | -| UID Field | An AD attribute that is unique to every user.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | -| Groups Field | Make entries for managing group memberships.

Example: `http://schemas.xmlsoap.org/claims/Group` | -| Rancher API Host | The URL for your Rancher Server. | -| Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

[Certificate creation command](#cert-command) | -| Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | - - - - -**Tip:** You can generate a certificate using an openssl command. For example: - -``` -openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" -``` diff --git a/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md deleted file mode 100644 index 7bd3c928b..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Configuring Okta (SAML) -weight: 1210 ---- - -_Available as of v2.2.0_ - -If your organization uses Okta Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - ->**Note:** Okta integration only supports Service Provider initiated logins. - -## Prerequisites - -In Okta, create a SAML Application with the settings below. See the [Okta documentation](https://developer.okta.com/standards/SAML/setting_up_a_saml_application_in_okta) for help. - -Setting | Value -------------|------------ -`Single Sign on URL` | `https://yourRancherHostURL/v1-saml/okta/saml/acs` -`Audience URI (SP Entity ID)` | `https://yourRancherHostURL/v1-saml/okta/saml/metadata` - -## Configuring Okta in Rancher - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Okta**. - -1. Complete the **Configure Okta Account** form. The examples below describe how you can map Okta attributes from attribute statements to fields within Rancher. - - | Field | Description | - | ------------------------- | ----------------------------------------------------------------------------- | - | Display Name Field | The attribute name from an attribute statement that contains the display name of users. | - | User Name Field | The attribute name from an attribute statement that contains the user name/given name. | - | UID Field | The attribute name from an attribute statement that is unique to every user. | - | Groups Field | The attribute name in a group attribute statement that exposes your groups. | - | Rancher API Host | The URL for your Rancher Server. | - | Private Key / Certificate | A key/certificate pair used for Assertion Encryption. | - | Metadata XML | The `Identity Provider metadata` file that you find in the application `Sign On` section. | - - >**Tip:** You can generate a key/certificate pair using an openssl command. For example: - > - > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.crt - - - -1. After you complete the **Configure Okta Account** form, click **Authenticate with Okta**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Okta IdP to validate your Rancher Okta configuration. - - >**Note:** If nothing seems to happen, it's likely because your browser blocked the pop-up. Make sure you disable the pop-up blocker for your rancher domain and whitelist it in any other extensions you might utilize. - -**Result:** Rancher is configured to work with Okta. Your users can now sign into Rancher using their Okta logins. - -{{< saml_caveats >}} diff --git a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md deleted file mode 100644 index 4ddb01fb8..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Configuring OpenLDAP -weight: 1113 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/authentication/openldap/ ---- - -_Available as of v2.0.5_ - -If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. - -## Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](./openldap-config) - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. - -> **Note:** -> -> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. - -1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. -2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. - -**Result:** - -- OpenLDAP authentication is configured. -- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config/_index.md deleted file mode 100644 index addd6773a..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: OpenLDAP Configuration Reference -weight: 2 ---- - -This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. - -For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) - -> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) -- [OpenLDAP server configuration](#openldap-server-configuration) -- [User/group schema configuration](#user-group-schema-configuration) - - [User schema configuration](#user-schema-configuration) - - [Group schema configuration](#group-schema-configuration) - -## Background: OpenLDAP Authentication Flow - -1. When a user attempts to login with his LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. -2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. -3. Once the user has been found, he is authenticated with another LDAP bind request using the user's DN and provided password. -4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. - -# OpenLDAP Server Configuration - -You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -
OpenLDAP Server Parameters
- -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the OpenLDAP server | -| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | -| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. (see [Prerequisites](#prerequisites)). | -| Service Account Password | The password for the service account. | -| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| - -# User/Group Schema Configuration - -If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. - -Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. - -If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -### User Schema Configuration - -The table below details the parameters for the user schema configuration. - -
User Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | -| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | -| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | -| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | - -### Group Schema Configuration - -The table below details the parameters for the group schema configuration. - -
Group Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md deleted file mode 100644 index e9a43d062..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Configuring PingIdentity (SAML) -weight: 1200 ---- -_Available as of v2.0.7_ - -If your organization uses Ping Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - ->**Prerequisites:** -> ->- You must have a [Ping IdP Server](https://www.pingidentity.com/) configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/ping/saml/metadata` -Assertion Consumer Service (ACS) URL: `https:///v1-saml/ping/saml/acs` -Note that these URLs will not return valid data until the authentication configuration is saved in Rancher. ->- Export a `metadata.xml` file from your IdP Server. For more information, see the [PingIdentity documentation](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **PingIdentity**. - -1. Complete the **Configure Ping Account** form. Ping IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). - - 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). - - 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). - - 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). - - 1. **Rancher API Host**: Enter the URL for your Rancher Server. - - 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. - - You can generate one using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 1. **IDP-metadata**: The `metadata.xml` file that you [exported from your IdP server](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). - - -1. After you complete the **Configure Ping Account** form, click **Authenticate with Ping**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Ping IdP to validate your Rancher PingIdentity configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with PingIdentity. Your users can now sign into Rancher using their PingIdentity logins. - -{{< saml_caveats >}} diff --git a/content/rancher/v2.x/en/admin-settings/authentication/shibboleth/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/shibboleth/_index.md deleted file mode 100644 index 4e2c2001d..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/shibboleth/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Configuring Shibboleth (SAML) -weight: 1210 ---- - -_Available as of v2.4.0_ - -If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. - -In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. - -> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](./about) - -This section covers the following topics: - -- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) - - [Shibboleth Prerequisites](#shibboleth-prerequisites) - - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) - - [SAML Provider Caveats](#saml-provider-caveats) -- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) - - [OpenLDAP Prerequisites](#openldap-prerequisites) - - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) - - [Troubleshooting](#troubleshooting) - -# Setting up Shibboleth in Rancher - -### Shibboleth Prerequisites -> ->- You must have a Shibboleth IdP Server configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` -Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` ->- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) - -### Configure Shibboleth in Rancher -If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Shibboleth**. - -1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). - - 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). - - 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). - - 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). - - 1. **Rancher API Host**: Enter the URL for your Rancher Server. - - 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. - - You can generate one using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. - - -1. After you complete the **Configure Shibboleth Account** form, click **Authenticate with Shibboleth**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. - -### SAML Provider Caveats - -If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. - -- There is no validation on users or groups when assigning permissions to them in Rancher. -- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. -- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. -- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. - -To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. - -# Setting up OpenLDAP in Rancher - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. - -### OpenLDAP Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -### Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config) Note that nested group membership is not available for Shibboleth. - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -# Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/shibboleth/about/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/shibboleth/about/_index.md deleted file mode 100644 index 6a057b210..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/shibboleth/about/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Group Permissions with Shibboleth and OpenLDAP -weight: 1 ---- - -_Available as of Rancher v2.4_ - -This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. - -Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. - -One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. - -### Terminology - -- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. -- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. -- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. -- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. - -### Adding OpenLDAP Group Permissions to Rancher Resources - -The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. - -For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. - -In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. - -When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. - -Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. - -![Adding OpenLDAP Group Permissions to Rancher Resources]({{}}/img/rancher/shibboleth-with-openldap-groups.svg) - \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md deleted file mode 100644 index d88eb423f..000000000 --- a/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Users and Groups -weight: 1 ---- - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. - -Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.x/en/admin-settings/rbac/). - -## Managing Members - -When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{}}/rancher/v2.x/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. - -All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. - -{{< saml_caveats >}} - -## User Information - -Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. - -Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. - -### Automatically Refreshing User Information - -_Available as of v2.2.0_ - -Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. From the **Global** view, click on **Settings**. Two settings control this behavior: - -- **`auth-user-info-max-age-seconds`** - - This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. - -- **`auth-user-info-resync-cron`** - - This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. - - -> **Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. - -### Manually Refreshing User Information - -If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. - -1. From the **Global** view, click on **Users** in the navigation bar. - -1. Click on **Refresh Group Memberships**. - -**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. - ->**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. - - -## Session Length - -_Available as of v2.3.0_ - -The default length (TTL) of each user session is adjustable. The default session length is 16 hours. - -1. From the **Global** view, click on **Settings**. -1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** -1. Enter the amount of time in minutes a session length should last and click **Save.** - -**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/content/rancher/v2.x/en/admin-settings/config-private-registry/_index.md b/content/rancher/v2.x/en/admin-settings/config-private-registry/_index.md deleted file mode 100644 index 504a8f7f9..000000000 --- a/content/rancher/v2.x/en/admin-settings/config-private-registry/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Configuring a Global Default Private Registry -weight: 400 -aliases: ---- - -You might want to use a private Docker registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the Docker images that are used in your clusters. - -There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. - -For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation]({{}}/rancher/v2.x/en/installation/air-gap-single-node) or [air gapped Kubernetes installation]({{}}/rancher/v2.x/en/installation/air-gap-high-availability) instructions. - -If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. - -# Setting a Private Registry with No Credentials as the Default Registry - -1. Log into Rancher and configure the default administrator password. - -1. Go into the **Settings** view. - - {{< img "/img/rancher/airgap/settings.png" "Settings" >}} - -1. Look for the setting called `system-default-registry` and choose **Edit**. - - {{< img "/img/rancher/airgap/edit-system-default-registry.png" "Edit" >}} - -1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - - {{< img "/img/rancher/airgap/enter-system-default-registry.png" "Save" >}} - -**Result:** Rancher will use your private registry to pull system images. - -# Setting a Private Registry with Credentials when Deploying a Cluster - -You can follow these steps to configure a private registry when you provision a cluster with Rancher: - -1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** -1. In the Enable Private Registries section, click **Enabled.** -1. Enter the registry URL and credentials. -1. Click **Save.** - -**Result:** The new cluster will be able to pull images from the private registry. diff --git a/content/rancher/v2.x/en/admin-settings/drivers/_index.md b/content/rancher/v2.x/en/admin-settings/drivers/_index.md deleted file mode 100644 index 30b8d47ac..000000000 --- a/content/rancher/v2.x/en/admin-settings/drivers/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Provisioning Drivers -weight: 1140 ---- - -Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -### Rancher Drivers - -With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. - -There are two types of drivers within Rancher: - -* [Cluster Drivers](#cluster-drivers) -* [Node Drivers](#node-drivers) - -### Cluster Drivers - -_Available as of v2.2.0_ - -Cluster drivers are used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. - -By default, Rancher has activated several hosted Kubernetes cloud providers including: - -* [Amazon EKS]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) -* [Google GKE]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) -* [Azure AKS]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) - -There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: - -* [Alibaba ACK]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) -* [Huawei CCE]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) -* [Tencent]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) - -### Node Drivers - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: - -* [Amazon EC2]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) -* [Azure]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/) -* [Digital Ocean]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) -* [vSphere]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) diff --git a/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md b/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md deleted file mode 100644 index ef92a737b..000000000 --- a/content/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Cluster Drivers -weight: 1 ---- - -_Available as of v2.2.0_ - -Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. - -If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. - -### Managing Cluster Drivers - ->**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - -## Activating/Deactivating Cluster Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page, select the **Cluster Drivers** tab. - -3. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Cluster Drivers - -If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page select the **Cluster Drivers** tab. - -3. Click **Add Cluster Driver**. - -4. Complete the **Add Cluster Driver** form. Then click **Create**. - - -### Developing your own Cluster Driver - -In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md b/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md deleted file mode 100644 index 5f214d8d1..000000000 --- a/content/rancher/v2.x/en/admin-settings/drivers/node-drivers/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Node Drivers -weight: 2 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/node-drivers/ - - /rancher/v2.x/en/tasks/global-configuration/node-drivers/ ---- - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -#### Managing Node Drivers - ->**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - -## Activating/Deactivating Node Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version before v2.2.0, you can select **Node Drivers** directly in the navigation bar. - -2. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Node Drivers - -If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version before v2.2.0, you can select **Node Drivers** directly in the navigation bar. - -2. Click **Add Node Driver**. - -3. Complete the **Add Node Driver** form. Then click **Create**. - -### Developing your own node driver - -Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md b/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md deleted file mode 100644 index 104c2189a..000000000 --- a/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Upgrading Kubernetes without Upgrading Rancher -weight: 1120 ---- - -_Available as of v2.3.0_ - -The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. - -> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. - -Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. - -This table below describes the CRDs that are affected by the periodic data sync. - -> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. - -| Resource | Description | Rancher API URL | -|----------|-------------|-----------------| -| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | -| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | -| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | - -Administrators might configure the RKE metadata settings to do the following: - -- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher -- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub -- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher - -### Refresh Kubernetes Metadata - -The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) - -To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. - -You can configure Rancher to only refresh metadata when desired by setting `refresh-interval-minutes` to `0` (see below) and using this button to perform the metadata refresh manually when desired. - -### Configuring the Metadata Synchronization - -> Only administrators can change these settings. - -The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. - -The way that the metadata is configured depends on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.4+" %}} -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. - -If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -{{% /tab %}} -{{% tab "Rancher v2.3" %}} -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. - - `branch`: This refers to the Git branch name if the URL is a Git URL. - -If you don't have an air gap setup, you don't need to specify the URL or Git branch where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata.git) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL and Git branch in the `rke-metadata-config` settings to point to the new location of the repository. -{{% /tab %}} -{{% /tabs %}} - -### Air Gap Setups - -Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) - -If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. - -To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) - -After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. - -1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. -1. Download the OS specific image lists for Linux or Windows. -1. Download `rancher-images.txt`. -1. Prepare the private registry using the same steps during the [air gap install]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. - -**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md deleted file mode 100644 index 426ad2f10..000000000 --- a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Pod Security Policies -weight: 1135 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/pod-security-policies/ - - /rancher/v2.x/en/tasks/global-configuration/pod-security-policies/ - - /rancher/v2.x/en/tasks/clusters/adding-a-pod-security-policy/ ---- - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). - -If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. - -- [How PSPs Work](#how-psps-work) -- [Default PSPs](#default-psps) - - [Restricted](#restricted) - - [Unrestricted](#unrestricted) -- [Creating PSPs](#creating-psps) - - [Requirements](#requirements) - - [Creating PSPs in the Rancher UI](#creating-psps-in-the-rancher-ui) -- [Configuration](#configuration) - -# How PSPs Work - -You can assign PSPs at the cluster or project level. - -PSPs work through inheritance: - -- By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. -- **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. -- You can override the default PSP by assigning a different PSP directly to the project. - -Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. - -Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). - -# Default PSPs - -_Available as of v2.0.7_ - -Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies. - -### Restricted - -This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: - -- Prevents pods from running as a privileged user and prevents escalation of privileges. -- Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added). - -### Unrestricted - -This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. - -# Creating PSPs - -Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. - -### Requirements - -Rancher can only assign PSPs for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) - -It is a best practice to set PSP at the cluster level. - -We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. - -### Creating PSPs in the Rancher UI - -1. From the **Global** view, select **Security** > **Pod Security Policies** from the main menu. Then click **Add Policy**. - - **Step Result:** The **Add Policy** form opens. - -2. Name the policy. - -3. Complete each section of the form. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for more information on what each policy does. - - -# Configuration - -The Kubernetes documentation on PSPs is [here.](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) - - - - - -[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems -[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces -[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups diff --git a/content/rancher/v2.x/en/admin-settings/rbac/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/_index.md deleted file mode 100644 index 01b6eaaca..000000000 --- a/content/rancher/v2.x/en/admin-settings/rbac/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Role-Based Access Control (RBAC) -weight: 1120 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/users-permissions-roles/ ---- - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/), users can either be local or external. - -After you configure external authentication, the users that display on the **Users** page changes. - -- If you are logged in as a local user, only local users display. - -- If you are logged in as an external user, both external and local users display. - -## Users and Roles - -Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. - -- [Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/): - - Define user authorization outside the scope of any particular cluster. - -- [Cluster and Project Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/): - - Define user authorization inside the specific cluster or project where they are assigned the role. - -Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md deleted file mode 100644 index 9cc4c3831..000000000 --- a/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Cluster and Project Roles -weight: 1127 ---- - -Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. - -### Membership and Role Assignment - -The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. - -When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. - -> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. - -### Cluster Roles - -_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. - -- **Cluster Owner:** - - These users have full control over the cluster and all resources in it. - -- **Cluster Member:** - - These users can view most cluster level resources and create new projects. - -#### Custom Cluster Roles - -Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. - -#### Cluster Role Reference - -The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. - -| Built-in Cluster Role | Owner | Member | -| ---------------------------------- | ------------- | --------------------------------- | -| Create Projects | ✓ | ✓ | -| Manage Cluster Backups             | ✓ | | -| Manage Cluster Catalogs | ✓ | | -| Manage Cluster Members | ✓ | | -| Manage Nodes | ✓ | | -| Manage Storage | ✓ | | -| View All Projects | ✓ | | -| View Cluster Catalogs | ✓ | ✓ | -| View Cluster Members | ✓ | ✓ | -| View Nodes | ✓ | ✓ | - -For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Note:** ->When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Giving a Custom Cluster Role to a Cluster Member - -After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/#adding-a-custom-role) cluster owners and admins can then assign those roles to cluster members. - -To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. - -To assign the role to a new cluster member, - -1. Go to the **Cluster** view, then go to the **Members** tab. -1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. -1. Click **Create.** - -**Result:** The member has the assigned role. - -To assign any custom role to an existing cluster member, - -1. Go to the member you want to give the role to. Click the **⋮ > View in API.** -1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** - -**Result:** The member has the assigned role. - -### Project Roles - -_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. - -- **Project Owner:** - - These users have full control over the project and all resources in it. - -- **Project Member:** - - These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. - -- **Read Only:** - - These users can view everything in the project but cannot create, update, or delete anything. - - >**Caveat:** - > - >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - - -#### Custom Project Roles - -Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. - -#### Project Role Reference - -The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. - -| Built-in Project Role | Owner | Member | Read Only | -| ---------------------------------- | ------------- | ----------------------------- | ------------- | -| Manage Project Members | ✓ | | | -| Create Namespaces | ✓ | ✓ | | -| Manage Config Maps | ✓ | ✓ | | -| Manage Ingress | ✓ | ✓ | | -| Manage Project Catalogs | ✓ | | | -| Manage Secrets | ✓ | ✓ | | -| Manage Service Accounts | ✓ | ✓ | | -| Manage Services | ✓ | ✓ | | -| Manage Volumes | ✓ | ✓ | | -| Manage Workloads | ✓ | ✓ | | -| View Secrets | ✓ | ✓ | | -| View Config Maps | ✓ | ✓ | ✓ | -| View Ingress | ✓ | ✓ | ✓ | -| View Project Members | ✓ | ✓ | ✓ | -| View Project Catalogs | ✓ | ✓ | ✓ | -| View Service Accounts | ✓ | ✓ | ✓ | -| View Services | ✓ | ✓ | ✓ | -| View Volumes | ✓ | ✓ | ✓ | -| View Workloads | ✓ | ✓ | ✓ | - -> **Notes:** -> ->- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. - -### Defining Custom Roles -As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. - -When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. - -### Default Cluster and Project Roles - -By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. - -There are two methods for changing default cluster/project roles: - -- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - -- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. - - For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). - ->**Note:** -> ->- Although you can [lock]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. ->- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. - -### Configuring Default Roles for Cluster and Project Creators - -You can change the cluster or project role(s) that are automatically assigned to the creating user. - -1. From the **Global** view, select **Security > Roles** from the main menu. Select either the **Cluster** or **Project** tab. - -1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit**. - -1. Enable the role as default. -{{% accordion id="cluster" label="For Clusters" %}} -1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. -1. Click **Save**. -{{% /accordion %}} -{{% accordion id="project" label="For Projects" %}} -1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. -1. Click **Save**. -{{% /accordion %}} - -1. If you want to remove a default role, edit the permission and select **No** from the default roles option. - -**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. - -### Cluster Membership Revocation Behavior - -When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: - -- Access the projects they hold membership in. -- Exercise any [individual project roles](#project-role-reference) they are assigned. - -If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md deleted file mode 100644 index eda68f183..000000000 --- a/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Custom Roles -weight: 1128 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/roles/ ---- - -Within Rancher, _roles_ determine what actions a user can make within a cluster or project. - -Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) -- [Creating a custom global role](#creating-a-custom-global-role) -- [Deleting a custom global role](#deleting-a-custom-global-role) -- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) - -## Prerequisites - -To complete the tasks on this page, one of the following permissions are required: - - - [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). - - [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - -## Creating A Custom Role for a Cluster or Project - -While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. - -The steps to add custom roles differ depending on the version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.0.7+" %}} - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Select a tab to determine the scope of the roles you're adding. The tabs are: - - - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. - - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. - -1. Click **Add Cluster/Project Role.** - -1. **Name** the role. - -1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. - - > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -{{% /tab %}} -{{% tab "Rancher before v2.0.7" %}} - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Click **Add Role**. - -1. **Name** the role. - -1. Choose whether to set the role to a status of [locked]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). - - > **Note:** Locked roles cannot be assigned to users. - -1. In the **Context** dropdown menu, choose the scope of the role assigned to the user. The contexts are: - - - **All:** The user can use their assigned role regardless of context. This role is valid for assignment when adding/managing members to clusters or projects. - - - **Cluster:** This role is valid for assignment when adding/managing members to _only_ clusters. - - - **Project:** This role is valid for assignment when adding/managing members to _only_ projects. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -## Creating a Custom Global Role - -_Available as of v2.4.0_ - -### Creating a Custom Global Role that Copies Rules from an Existing Role - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. - -The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. - -To create a custom global role based on an existing role, - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, go to the role that the custom global role will be based on. Click **⋮ (…) > Clone.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - -1. Click **Save.** - -### Creating a Custom Global Role that Does Not Copy Rules from Another Role - -Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, click **Add Global Role.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - -1. Click **Save.** - -## Deleting a Custom Global Role - -_Available as of v2.4.0_ - -When deleting a custom global role, all global role bindings with this custom role are deleted. - -If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. - -Custom global roles can be deleted, but built-in roles cannot be deleted. - -To delete a custom global role, - -1. Go to the **Global** view and click **Security > Roles.** -2. On the **Global** tab, go to the custom global role that should be deleted and click **⋮ (…) > Delete.** -3. Click **Delete.** - -## Assigning a Custom Global Role to a Group - -_Available as of v2.4.0_ - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. - -When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Custom** section, choose any custom global role that will be assigned to the group. -1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md deleted file mode 100644 index 55cf08a04..000000000 --- a/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Global Permissions -weight: 1126 ---- - -_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. - -Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are three default global permissions: `Administrator`, `Standard User` and `User-base`. - -- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. - -- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. - -- **User-Base:** User-Base users have login-access only. - -You cannot update or delete the built-in Global Permissions. - -This section covers the following topics: - -- [Restricted Admin](#restricted-admin) -- [Global permission assignment](#global-permission-assignment) - - [Global permissions for new local users](#global-permissions-for-new-local-users) - - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) -- [Custom global permissions](#custom-global-permissions) - - [Custom global permissions reference](#custom-global-permissions-reference) - - [Configuring default global permissions for new users](#configuring-default-global-permissions) - - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) - - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) - - [Refreshing group memberships](#refreshing-group-memberships) - -# Restricted Admin - -_Available as of Rancher v2.5_ - -A new `restricted-admin` role was created in Rancher v2.5 in order to prevent privilege escalation from the local Rancher server Kubernetes cluster. This role has full administrator access to all downstream clusters managed by Rancher, but it does not have permission to alter the local Kubernetes cluster. - -The `restricted-admin` can create other `restricted-admin` users with an equal level of access. - -A new setting was added to Rancher to set the initial bootstrapped administrator to have the `restricted-admin` role. This applies to the first user created when the Rancher server is started for the first time. If the environment variable is set, then no global administrator would be created, and it would be impossible to create the global administrator through Rancher. - -To bootstrap Rancher with the `restricted-admin` as the initial user, the Rancher server should be started with the following environment variable: - -``` -CATTLE_RESTRICTED_DEFAULT_ADMIN=true -``` -### List of `restricted-admin` Permissions - -The `restricted-admin` permissions are as follows: - -- Has full admin access to all downstream clusters managed by Rancher. -- Has very limited access to the local Kubernetes cluster. Can access Rancher custom resource definitions, but has no access to any Kubernetes native types. -- Can add other users and assign them to clusters outside of the local cluster. -- Can create other restricted admins. -- Cannot grant any permissions in the local cluster they don't currently have. (This is how Kubernetes normally operates) - -### Upgrading from Rancher with a Hidden Local Cluster - -Before Rancher v2.5, it was possible to run the Rancher server using this flag to hide the local cluster: - -``` ---add-local=false -``` - -You will need to drop this flag when upgrading to Rancher v2.5. Otherwise, Rancher will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. - -### Changing Global Administrators to Restricted Admins - -If Rancher already has a global administrator, they should change all global administrators over to the new `restricted-admin` role. - -This can be done through **Security > Users** and moving any Administrator role over to Restricted Administrator. - -Signed-in users can change themselves over to the `restricted-admin` if they wish, but they should only do that as the last step, otherwise they won't have the permissions to do so. - -# Global Permission Assignment - -Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. - -### Global Permissions for New Local Users - -When you create a new local user, you assign them a global permission as you complete the **Add User** form. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) - -### Global Permissions for Users with External Authentication - -When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) - -Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) - -As of Rancher v2.4.0, you can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. - -# Custom Global Permissions - -Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. - -When a user from an [external authentication source]({{}}/rancher/v2.x/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. - -However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. - -The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. - -Administrators can enforce custom global permissions in multiple ways: - -- [Changing the default permissions for new users](#configuring-default-global-permissions) -- [Editing the permissions of an existing user](#configuring-global-permissions-for-individual-users) -- [Assigning a custom global permission to a group](#assigning-a-custom-global-permission-to-a-group) - -### Custom Global Permissions Reference - -The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. - -| Custom Global Permission | Administrator | Standard User | User-Base | -| ---------------------------------- | ------------- | ------------- |-----------| -| Create Clusters | ✓ | ✓ | | -| Create RKE Templates | ✓ | ✓ | | -| Manage Authentication | ✓ | | | -| Manage Catalogs | ✓ | | | -| Manage Cluster Drivers | ✓ | | | -| Manage Node Drivers | ✓ | | | -| Manage PodSecurityPolicy Templates | ✓ | | | -| Manage Roles | ✓ | | | -| Manage Settings | ✓ | | | -| Manage Users | ✓ | | | -| Use Catalog Templates | ✓ | ✓ | | -| User Base\* (Basic log-in access) | ✓ | ✓ | | - -> \*This role has two names: -> -> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. -> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. - -For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Notes:** -> -> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. -> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Configuring Default Global Permissions - -If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. - -> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. - -To change the default global permissions that are assigned to external users upon their first log in, follow these steps: - -1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. - -1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit**. - -1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. - -1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. - -**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. - -### Configuring Global Permissions for Existing Individual Users - -To configure permission for a user, - -1. Go to the **Users** tab. - -1. On this page, go to the user whose access level you want to change and click **⋮ > Edit.** - -1. In the **Global Permissions** section, click **Custom.** - -1. Check the boxes for each subset of permissions you want the user to have access to. - -1. Click **Save.** - -> **Result:** The user's global permissions have been updated. - -### Configuring Global Permissions for Groups - -_Available as of v2.4.0_ - -If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. - -After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. - -For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) - -For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,](#refreshing-group-memberships) whichever comes first. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. - -### Refreshing Group Memberships - -When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. - -To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. - -An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. - -To refresh group memberships, - -1. From the **Global** view, click **Security > Users.** -1. Click **Refresh Group Memberships.** - -**Result:** Any changes to the group members' permissions will take effect. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md deleted file mode 100644 index 3bbfd52bd..000000000 --- a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Locked Roles -weight: 1129 ---- - -You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. - -Locked roles: - -- Cannot be assigned to users that don't already have it assigned. -- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. -- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. - - **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. - - To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. - -Roles can be locked by the following users: - -- Any user assigned the `Administrator` global permission. -- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. - - -## Locking/Unlocking Roles - -If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. - -You can lock roles in two contexts: - -- When you're [adding a custom role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). -- When you editing an existing role (see below). - -1. From the **Global** view, select **Security** > **Roles**. - -2. From the role that you want to lock (or unlock), select **⋮** > **Edit**. - -3. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/_index.md deleted file mode 100644 index 3c0d8cbcd..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/_index.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: RKE Templates -weight: 7010 ---- - -_Available as of Rancher v2.3.0_ - -RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. - -RKE is the [Rancher Kubernetes Engine,]({{}}/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. - -With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. - -RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. - -Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. - -If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. - -The core features of RKE templates allow DevOps and security teams to: - -- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices -- Prevent less technical users from making uninformed choices when provisioning clusters -- Share different templates with different sets of users and groups -- Delegate ownership of templates to users who are trusted to make changes to them -- Control which users can create templates -- Require users to create clusters from a template - -# Configurable Settings - -RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: - -- Cloud provider options -- Pod security options -- Network providers -- Ingress controllers -- Network security configuration -- Network plugins -- Private registry URL and credentials -- Add-ons -- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services - -The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. - -# Scope of RKE Templates - -RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. - -RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware]({{}}/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware). - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template), and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. - - -# Example Scenarios -When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. - -These [example scenarios]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios) describe how an organization could use templates to standardize cluster creation. - -Some of the example scenarios include the following: - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#allowing-other-users-to-control-and-share-a-template) - -# Template Management - -When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. - -Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. - -RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. - -In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. - -For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. - -The documents in this section explain the details of RKE template management: - -- [Getting permission to create templates]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/) -- [Creating and revising templates]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/) -- [Enforcing template settings]({{}}/rancher/v2.x/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-a-cluster-template) -- [Overriding template settings]({{}}/rancher/v2.x/en/admin-settings/rke-templates/overrides/) -- [Sharing templates with cluster creators]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users) -- [Sharing ownership of a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -An [example YAML configuration file for a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-yaml) is provided for reference. - -# Applying Templates - -You can [create a cluster from a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-a-cluster-template) that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) - -If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -# Standardizing Hardware - -RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools]({{}}/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware). - -# YAML Customization - -If you define an RKE template as a YAML file, you can modify this [example RKE template YAML]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-yaml). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. - -The RKE documentation also has [annotated]({{}}/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. - -For guidance on available options, refer to the RKE documentation on [cluster configuration.]({{}}/rke/latest/en/config-options/) - -### Add-ons - -The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file]({{}}/rke/latest/en/config-options/add-ons/). - -The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. - -Some things you could do with add-ons include: - -- Install applications on the Kubernetes cluster after it starts -- Install plugins on nodes that are deployed with a Kubernetes daemonset -- Automatically set up namespaces, service accounts, or role binding - -The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/_index.md deleted file mode 100644 index 06a62b8e0..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Applying Templates -weight: 50 ---- - -You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) - -RKE templates can be applied to new clusters. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -This section covers the following topics: - -- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) -- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) -- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) - -### Creating a Cluster from an RKE Template - -To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) using an RKE template, use these steps: - -1. From the **Global** view, go to the **Clusters** tab. -1. Click **Add Cluster** and choose the infrastructure provider. -1. Provide the cluster name and node template details as usual. -1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** -1. Choose an existing template and revision from the dropdown menu. -1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. -1. Click **Save** to launch the cluster. - -### Updating a Cluster Created with an RKE Template - -When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. - -- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) -- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. - -If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. - -As of Rancher v2.3.3, an existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. - -> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -### Converting an Existing Cluster to Use an RKE Template - -_Available as of v2.3.3_ - -This section describes how to create an RKE template from an existing cluster. - -RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/_index.md deleted file mode 100644 index 10935277f..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/_index.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Creating and Revising Templates -weight: 32 ---- - -This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** - -Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. - -Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. - -The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a template](#creating-a-template) -- [Updating a template](#updating-a-template) -- [Deleting a template](#deleting-a-template) -- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) -- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) -- [Disabling a template revision](#disabling-a-template-revision) -- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) -- [Setting a template revision as default](#setting-a-template-revision-as-default) -- [Deleting a template revision](#deleting-a-template-revision) -- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) -- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) - -### Prerequisites - -You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions) - -You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -### Creating a Template - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Click **Add Template.** -1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. -1. Optional: Share the template with other users or groups by [adding them as members.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users) You can also make the template public to share with everyone in the Rancher setup. -1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. - -**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. - -### Updating a Template - -When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. - -You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) - -When new template revisions are created, clusters using an older revision of the template are unaffected. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to edit and click the **⋮ > Edit.** -1. Edit the required information and click **Save.** -1. Optional: You can change the default revision of this template and also change who it is shared with. - -**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) - -### Deleting a Template - -When you no longer use an RKE template for any of your clusters, you can delete it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to delete and click the **⋮ > Delete.** -1. Confirm the deletion when prompted. - -**Result:** The template is deleted. - -### Creating a Revision Based on the Default Revision - -You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to clone and click the **⋮ > New Revision From Default.** -1. Complete the rest of the form to create a new revision. - -**Result:** The RKE template revision is cloned and configured. - -### Creating a Revision Based on a Cloned Revision - -When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision.** -1. Complete the rest of the form. - -**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. - -### Disabling a Template Revision - -When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. - -You can disable the revision if it is not being used by any cluster. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to disable. Then select **⋮ > Disable.** - -**Result:** The RKE template revision cannot be used to create a new cluster. - -### Re-enabling a Disabled Template Revision - -If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to re-enable. Then select **⋮ > Enable.** - -**Result:** The RKE template revision can be used to create a new cluster. - -### Setting a Template Revision as Default - -When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. - -To set an RKE template revision as default, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default.** - -**Result:** The RKE template revision will be used as the default option when clusters are created with the template. - -### Deleting a Template Revision - -You can delete all revisions of a template except for the default revision. - -To permanently delete a revision, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete.** - -**Result:** The RKE template revision is deleted. - -### Upgrading a Cluster to Use a New Template Revision - -> This section assumes that you already have a cluster that [has an RKE template applied.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates) -> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. - -To upgrade a cluster to use a new template revision, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that you want to upgrade and click **⋮ > Edit.** -1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. -1. Click **Save.** - -**Result:** The cluster is upgraded to use the settings defined in the new template revision. - -### Exporting a Running Cluster to a New RKE Template and Revision - -You can save an existing cluster's settings as an RKE template. - -This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template and revision.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/_index.md deleted file mode 100644 index 0773da504..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Template Creator Permissions -weight: 10 ---- - -Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. - -For more information on administrator permissions, refer to the [documentation on global permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). - -# Giving Users Permission to Create Templates - -Templates can only be created by users who have the global permission **Create RKE Templates.** - -Administrators have the global permission to create templates, and only administrators can give that permission to other users. - -For information on allowing users to modify existing templates, refer to [Sharing Templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) - -Administrators can give users permission to create RKE templates in two ways: - -- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) -- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) - -### Allowing a User to Create Templates - -An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** - -**Result:** The user has permission to create RKE templates. - -### Allowing New Users to Create Templates by Default - -Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. - -1. From the **Global** view, click **Security > Roles.** -1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **⋮ > Edit**. -1. Select the option **Yes: Default role for new users** and click **Save.** - -**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. - -### Revoking Permission to Create Templates - -Administrators can remove a user's permission to create templates with the following steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. -1. Click **Save.** - -**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md deleted file mode 100644 index a1fa1e79d..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Template Enforcement -weight: 32 ---- - -This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. - -By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, - -- Only an administrator has the ability to create clusters without a template. -- All standard users must use an RKE template to create a new cluster. -- Standard users cannot create a cluster without using a template. - -Users can only create new templates if the administrator [gives them permission.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/#allowing-a-user-to-create-templates) - -After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) - -# Requiring New Clusters to Use an RKE Template - -You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) will use the Kubernetes and/or Rancher settings that are vetted by administrators. - -To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **True** and click **Save.** - -**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. - -# Disabling RKE Template Enforcement - -To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **False** and click **Save.** - -**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/_index.md deleted file mode 100644 index 4e93e102c..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/_index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Example Scenarios -weight: 5 ---- - -These example scenarios describe how an organization could use templates to standardize cluster creation. - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) - - -# Enforcing a Template Setting for Everyone - -Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. - -1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. -1. The administrator makes the template public. -1. The administrator turns on template enforcement. - -**Results:** - -- All Rancher users in the organization have access to the template. -- All new clusters created by [standard users]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. -- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. - -In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. - -# Templates for Basic and Advanced Users - -Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. - -1. First, an administrator turns on [RKE template enforcement.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-a-cluster-template) This means that every [standard user]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) in Rancher will need to use an RKE template when they create a cluster. -1. The administrator then creates two templates: - - - One template for basic users, with almost every option specified except for access keys - - One template for advanced users, which has most or all options has **Allow User Override** turned on - -1. The administrator shares the advanced template with only the advanced users. -1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. - -**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. - -# Updating Templates and Clusters Created with Them - -Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. - -In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. - -The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: - -- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. -- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. -- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. - -# Allowing Other Users to Control and Share a Template - -Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. - -Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. - -To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: - -- [Revise the template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) when the best practices change -- [Disable outdated revisions]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#disabling-a-template-revision) of the template so that no new clusters can be created with it -- [Delete the whole template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#deleting-a-template) if the organization wants to go in a different direction -- [Set a certain revision as default]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. -- [Share the template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md deleted file mode 100644 index 3c85e86d6..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Example YAML -weight: 60 ---- - -Below is an example RKE template configuration file for reference. - -The YAML in the RKE template uses the same customization that is used when you create an RKE cluster. However, since the YAML is within the context of a Rancher provisioned RKE cluster, the customization from the RKE docs needs to be nested under the `rancher_kubernetes_engine` directive. - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker - -enable_cluster_alerting: false -# This setting is not enforced. Clusters -# created with this sample template -# would have alerting turned off by default, -# but end users could still turn alerting -# on or off. - -enable_cluster_monitoring: true -# This setting is not enforced. Clusters -# created with this sample template -# would have monitoring turned on -# by default, but end users could still -# turn monitoring on or off. - -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/overrides/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/overrides/_index.md deleted file mode 100644 index bb5f00d4b..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/overrides/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Overriding Template Settings -weight: 33 ---- - -When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** - -After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. - -When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. - -The **Allow User Override** model of the RKE template is useful for situations such as: - -- Administrators know that some settings will need the flexibility to be frequently updated over time -- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md deleted file mode 100644 index 67ca181a9..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: RKE Templates and Infrastructure -weight: 90 ---- - -In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. - -Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. - -If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. - -### Node Templates - -[Node templates]({{}}/rancher/v2.x/en/user-settings/node-templates) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. - -### Terraform - -Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. - -This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. - -Terraform allows you to: - -- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates -- Leverage catalog apps and multi-cluster apps -- Codify infrastructure across many platforms, including Rancher and major cloud providers -- Commit infrastructure-as-code to version control -- Easily repeat configuration and setup of infrastructure -- Incorporate infrastructure changes into standard development practices -- Prevent configuration drift, in which some servers become configured differently than others - -# How Does Terraform Work? - -Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. - -To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. - -Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. - -When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. - -# Tips for Working with Terraform - -- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) - -- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. - -- You can also modify auth in the Terraform provider. - -- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. - -- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. - -# Tip for Creating CIS Benchmark Compliant Clusters - -This section describes one way that you can make security and compliance-related config files standard in your clusters. - -When you create a [CIS benchmark compliant cluster,]({{}}/rancher/v2.x/en/security/) you have an encryption config file and an audit log config file. - -Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. - -Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. - -In this way, you can create flags that comply with the CIS benchmark. - -# Resources - -- [Terraform documentation](https://www.terraform.io/docs/) -- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) -- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/_index.md deleted file mode 100644 index 863faa1bc..000000000 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Access and Sharing -weight: 31 ---- - -If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. - -Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. - -When you share a template, each user can have one of two access levels: - -- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. -- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. - -If you create a template, you automatically become an owner of that template. - -If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising) - -There are several ways to share templates: - -- Add users to a new RKE template during template creation -- Add users to an existing RKE template -- Make the RKE template public, sharing it with all users in the Rancher setup -- Share template ownership with users who are trusted to modify the template - -### Sharing Templates with Specific Users or Groups - -To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. In the **Share Template** section, click on **Add Member**. -1. Search in the **Name** field for the user or group you want to share the template with. -1. Choose the **User** access type. -1. Click **Save.** - -**Result:** The user or group can create clusters using the template. - -### Sharing Templates with All Users - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** - -**Result:** All users in the Rancher setup can create clusters using the template. - -### Sharing Ownership of Templates - -If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. - -In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. - -To give Owner access to a user or group, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. -1. In the **Access Type** field, click **Owner.** -1. Click **Save.** - -**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/content/rancher/v2.x/en/api/_index.md b/content/rancher/v2.x/en/api/_index.md deleted file mode 100644 index 66d9a267b..000000000 --- a/content/rancher/v2.x/en/api/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: API -weight: 24 ---- - -## How to use the API - -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{}}/rancher/v2.x/en/user-settings/api-keys/). - -## Authentication - -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.x/en/api/api-tokens). - -## Making requests - -The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). - -- Every type has a Schema which describes: - - The URL to get to the collection of this type of resources - - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. - - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). - - Every field that filtering is allowed on - - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. - - -- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. - -- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. - -- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. - -- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. - -- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. - -- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. - -- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). - -## Filtering - -Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. - -## Sorting - -Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. - -## Pagination - -API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. diff --git a/content/rancher/v2.x/en/api/api-tokens/_index.md b/content/rancher/v2.x/en/api/api-tokens/_index.md deleted file mode 100644 index 36f164526..000000000 --- a/content/rancher/v2.x/en/api/api-tokens/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: API Tokens -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/api/api-tokens/ ---- - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. Tokens are not invalidated by changing a password. - -You can deactivate API tokens by deleting them or by deactivating the user account. - -### Deleting tokens -To delete a token, - -1. Go to the list of all tokens in the Rancher API view at `https:///v3/tokens`. - -1. Access the token you want to delete by its ID. For example, `https:///v3/tokens/kubectl-shell-user-vqkqt` - -1. Click **Delete.** - -Here is the complete list of tokens that are generated with `ttl=0`: - -| Token | Description | -|-------|-------------| -| `kubeconfig-*` | Kubeconfig token | -| `kubectl-shell-*` | Access to `kubectl` shell in the browser | -| `agent-*` | Token for agent deployment | -| `compose-token-*` | Token for compose | -| `helm-token-*` | Token for Helm chart deployment | -| `*-pipeline*` | Pipeline token for project | -| `telemetry-*` | Telemetry token | -| `drain-node-*` | Token for drain (we use `kubectl` for drain because there is no native Kubernetes API) | - - -### Setting TTL on Kubeconfig Tokens -_**Available as of v2.4.6**_ - -Starting Rancher v2.4.6, admins can set a global TTL on Kubeconfig tokens. Once the token expires the kubectl command will require the user to authenticate to Rancher. - -_**Note:**_: - -Existing kubeconfig tokens won't be updated with the new TTL. Admins can [delete old kubeconfig tokens](#deleting-tokens). - -1. Disable the kubeconfig-generate-token setting in the Rancher API view at `https:// In Rancher v2.5, the `rancher-backup` operator is used to back up and restore Rancher. This section preserves the older documentation for backups and restores in Rancher v2.0.x-v2.4.x. - -This section is devoted to protecting your data in a disaster scenario. - -To protect yourself from a disaster scenario, you should create backups on a regular basis. - -- [Backup](./backup) -- [Restore](./restore) - diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/_index.md deleted file mode 100644 index defb1735c..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Backup -weight: 50 -aliases: - - /rancher/v2.x/en/installation/after-installation/ - - /rancher/v2.x/en/backups/ - - /rancher/v2.x/en/backups/backups - - /rancher/v2.x/en/backups/legacy/backup ---- -This section contains information about how to create backups of your Rancher data and how to restore them in a disaster scenario. - - - Rancher server backups: - - [Rancher installed on a K3s Kubernetes cluster](./k3s-backups) - - [Rancher installed on an RKE Kubernetes cluster](./rke-backups) - - [Rancher installed with Docker](./docker-backups) - -For information on backing up Rancher launched Kubernetes clusters, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) - -If you are looking to back up your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/). diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/docker-backups/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/docker-backups/_index.md deleted file mode 100644 index 4f176c7d4..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/docker-backups/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Backing up Rancher Installed with Docker -shortTitle: Docker Installs -weight: 3 -aliases: - - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/backups/backups/single-node-backups/ - - /rancher/v2.x/en/backups/legacy/backup/single-node-backups/ ---- - - -After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. - -### How to Read Placeholders - -During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run \ - --volumes-from rancher-data- \ - -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. - -### Obtaining Placeholder Data - -Get the placeholder data by running: - -``` -docker ps -``` - -Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. - -### Creating a Backup - -This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. - - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` -1. Use the command below, replacing each [placeholder](#before-you-start), to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data- rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each [placeholder](#before-you-start). - - ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** A stream of commands runs on the screen. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. - -1. Restart Rancher Server. Replace `` with the name of your [Rancher container](#before-you-start). - - ``` - docker start - ``` - -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/k3s-backups/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/k3s-backups/_index.md deleted file mode 100644 index 612ea4922..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/k3s-backups/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Backing up Rancher Installed on a K3s Kubernetes Cluster -shortTitle: K3s Installs -weight: 1 -aliases: - - /rancher/v2.x/en/backups/backups/k3s-backups - - /rancher/v2.x/en/backups/backups/k8s-backups/k3s-backups - - /rancher/v2.x/en/backups/legacy/backup/k8s-backups/k3s-backups/ - - /rancher/v2.x/en/backups/legacy/backups/k3s-backups - - /rancher/v2.x/en/backups/legacy/backup/k3s-backups ---- - -When Rancher is installed on a high-availability Kubernetes cluster, we recommend using an external database to store the cluster data. - -The database administrator will need to back up the external database, or restore it from a snapshot or dump. - -We recommend configuring the database to take recurring snapshots. - -### K3s Kubernetes Cluster Data - -One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. - -
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) - -### Creating Snapshots and Restoring Databases from Snapshots - -For details on taking database snapshots and restoring your database from them, refer to the official database documentation: - -- [Official MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-snapshot-method.html) -- [Official PostgreSQL documentation](https://www.postgresql.org/docs/8.3/backup-dump.html) -- [Official etcd documentation](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md) \ No newline at end of file diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/_index.md deleted file mode 100644 index fbb21303c..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/_index.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Backing up Rancher Installed on an RKE Kubernetes Cluster -shortTitle: RKE Installs -weight: 2 -aliases: - - /rancher/v2.x/en/installation/after-installation/k8s-install-backup-and-restoration/ - - /rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration/ - - /rancher/v2.x/en/backups/backups/ha-backups - - /rancher/v2.x/en/backups/backups/k8s-backups/ha-backups - - /rancher/v2.x/en/backups/legacy/backup/k8s-backups/ha-backups/ - - /rancher/v2.x/en/backups/legacy/backups/ha-backups - - /rancher/v2.x/en/backups/legacy/backup/ha-backups ---- -This section describes how to create backups of your high-availability Rancher install. - -In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. - -
Cluster Data within an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) - -# Requirements - -### RKE Version - -The commands for taking `etcd` snapshots are only available in RKE v0.1.7 and later. - -### RKE Config File - -You'll need the RKE config file that you used for Rancher install, `rancher-cluster.yml`. You created this file during your initial install. Place this file in same directory as the RKE binary. - - -# Backup Outline - - -Backing up your high-availability Rancher cluster is process that involves completing multiple tasks. - -1. [Take Snapshots of the `etcd` Database](#1-take-snapshots-of-the-etcd-database) - - Take snapshots of your current `etcd` database using Rancher Kubernetes Engine (RKE). - -1. [Store Snapshot(s) Externally](#2-back-up-local-snapshots-to-a-safe-location) - - After taking your snapshots, export them to a safe location that won't be affected if your cluster encounters issues. - - -# 1. Take Snapshots of the `etcd` Database - -Take snapshots of your `etcd` database. You can use these snapshots later to recover from a disaster scenario. There are two ways to take snapshots: recurringly, or as a one-off. Each option is better suited to a specific use case. Read the short description below each link to know when to use each option. - -- [Option A: Recurring Snapshots](#option-a-recurring-snapshots) - - After you stand up a high-availability Rancher install, we recommend configuring RKE to automatically take recurring snapshots so that you always have a safe restore point available. - -- [Option B: One-Time Snapshots](#option-b-one-time-snapshots) - - We advise taking one-time snapshots before events like upgrades or restore of another snapshot. - -### Option A: Recurring Snapshots - -For all high-availability Rancher installs, we recommend taking recurring snapshots so that you always have a safe restore point available. - -To take recurring snapshots, enable the `etcd-snapshot` service, which is a service that's included with RKE. This service runs in a service container alongside the `etcd` container. You can enable this service by adding some code to `rancher-cluster.yml`. - -**To Enable Recurring Snapshots:** - -The steps to enable recurring snapshots differ based on the version of RKE. - -{{% tabs %}} -{{% tab "RKE v0.2.0+" %}} - -1. Open `rancher-cluster.yml` with your favorite text editor. -2. Edit the code for the `etcd` service to enable recurring snapshots. Snapshots can be saved in a S3 compatible backend. - - ``` - services: - etcd: - backup_config: - enabled: true # enables recurring etcd snapshots - interval_hours: 6 # time increment between snapshots - retention: 60 # time in days before snapshot purge - # Optional S3 - s3backupconfig: - access_key: "myaccesskey" - secret_key: "myaccesssecret" - bucket_name: "my-backup-bucket" - folder: "folder-name" # Available as of v2.3.0 - endpoint: "s3.eu-west-1.amazonaws.com" - region: "eu-west-1" - custom_ca: |- - -----BEGIN CERTIFICATE----- - $CERTIFICATE - -----END CERTIFICATE----- - ``` -4. Save and close `rancher-cluster.yml`. -5. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. -6. Run the following command: - ``` - rke up --config rancher-cluster.yml - ``` - -**Result:** RKE is configured to take recurring snapshots of `etcd` on all nodes running the `etcd` role. Snapshots are saved locally to the following directory: `/opt/rke/etcd-snapshots/`. If configured, the snapshots are also uploaded to your S3 compatible backend. -{{% /tab %}} -{{% tab "RKE v0.1.x" %}} - -1. Open `rancher-cluster.yml` with your favorite text editor. -2. Edit the code for the `etcd` service to enable recurring snapshots. - - ``` - services: - etcd: - snapshot: true # enables recurring etcd snapshots - creation: 6h0s # time increment between snapshots - retention: 24h # time increment before snapshot purge - ``` -4. Save and close `rancher-cluster.yml`. -5. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. -6. Run the following command: - ``` - rke up --config rancher-cluster.yml - ``` - -**Result:** RKE is configured to take recurring snapshots of `etcd` on all nodes running the `etcd` role. Snapshots are saved locally to the following directory: `/opt/rke/etcd-snapshots/`. -{{% /tab %}} -{{% /tabs %}} - - -### Option B: One-Time Snapshots - -When you're about to upgrade Rancher or restore it to a previous snapshot, you should snapshot your live image so that you have a backup of `etcd` in its last known state. - -**To Take a One-Time Local Snapshot:** - -1. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. - -2. Enter the following command. Replace `` with any name that you want to use for the snapshot (e.g. `upgrade.db`). - - ``` - rke etcd snapshot-save \ - --name \ - --config rancher-cluster.yml - ``` - -**Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. - -**To Take a One-Time S3 Snapshot:** - -_Available as of RKE v0.2.0_ - -1. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. - -2. Enter the following command. Replace `` with any name that you want to use for the snapshot (e.g. `upgrade.db`). - - ```shell - rke etcd snapshot-save \ - --config rancher-cluster.yml \ - --name snapshot-name \ - --s3 \ - --access-key S3_ACCESS_KEY \ - --secret-key S3_SECRET_KEY \ - --bucket-name s3-bucket-name \ - --s3-endpoint s3.amazonaws.com \ - --folder folder-name # Available as of v2.3.0 - ``` - -**Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. It is also uploaded to the S3 compatible backend. - -# 2. Back up Local Snapshots to a Safe Location - -> **Note:** If you are using RKE v0.2.0, you can enable saving the backups to a S3 compatible backend directly and skip this step. - -After taking the `etcd` snapshots, save them to a safe location so that they're unaffected if your cluster experiences a disaster scenario. This location should be persistent. - -In this documentation, as an example, we're using Amazon S3 as our safe location, and [S3cmd](http://s3tools.org/s3cmd) as our tool to create the backups. The backup location and tool that you use are ultimately your decision. - -**Example:** - -``` -root@node:~# s3cmd mb s3://rke-etcd-snapshots -root@node:~# s3cmd put /opt/rke/etcd-snapshots/snapshot.db s3://rke-etcd-snapshots/ -``` diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/_index.md deleted file mode 100644 index 515d0e777..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Restore -weight: 1010 -aliases: - - /rancher/v2.x/en/backups/restorations - - /rancher/v2.x/en/backups/legacy/restore ---- -If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. - -- [Restoring backups for Rancher installed with Docker](./docker-restores) -- [Restoring backups for Rancher installed on an RKE Kubernetes cluster](./rke-restore) -- [Restoring backups for Rancher installed on a K3s Kubernetes cluster](./k3s-restore) - -If you are looking to restore your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer to [this section]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/docker-restores/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/docker-restores/_index.md deleted file mode 100644 index 0653b10e7..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/docker-restores/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Restoring Backups—Docker Installs -shortTitle: Docker Installs -weight: 3 -aliases: - - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/backups/restorations/single-node-restoration ---- - -If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. - -## Before You Start - -During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from -v $PWD:/backup \ -busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar pzxvf /backup/rancher-data-backup--" -``` - -In this command, `` and `-` are environment variables for your Rancher deployment. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version number for your Rancher backup. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Restoring Backups - -Using a [backup]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Enter the following command to delete your current state data and replace it with your backup data, replacing the [placeholders](#before-you-start). Don't forget to close the quotes. - - >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. - - ``` - docker run --volumes-from -v $PWD:/backup \ - busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar pzxvf /backup/rancher-data-backup--.tar.gz" - ``` - - **Step Result:** A series of commands should run. - -1. Restart your Rancher Server container, replacing the [placeholder](#before-you-start). It will restart using your backup data. - - ``` - docker start - ``` - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/k3s-restore/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/k3s-restore/_index.md deleted file mode 100644 index 715836e61..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/k3s-restore/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Restoring Rancher Installed on a K3s Kubernetes Cluster -shortTitle: K3s Installs -weight: 1 -aliases: - - /rancher/v2.x/en/backups/restorations/k3s-restoration - - /rancher/v2.x/en/backups/restorations/k8s-restore/k3s-restore - - /rancher/v2.x/en/backups/legacy/restore/k8s-restore/k3s-restore/ - - /rancher/v2.x/en/backups/legacy/restore/k3s-restore ---- - -When Rancher is installed on a high-availability Kubernetes cluster, we recommend using an external database to store the cluster data. - -The database administrator will need to back up the external database, or restore it from a snapshot or dump. - -We recommend configuring the database to take recurring snapshots. - -### Creating Snapshots and Restoring Databases from Snapshots - -For details on taking database snapshots and restoring your database from them, refer to the official database documentation: - -- [Official MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-snapshot-method.html) -- [Official PostgreSQL documentation](https://www.postgresql.org/docs/8.3/backup-dump.html) -- [Official etcd documentation](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md) \ No newline at end of file diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/_index.md deleted file mode 100644 index 65d9d9e07..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/_index.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Restoring Backups—Kubernetes installs -shortTitle: RKE Installs -weight: 2 -aliases: - - /rancher/v2.x/en/installation/after-installation/ha-backup-and-restoration/ - - /rancher/v2.x/en/backups/restorations/ha-restoration - - /rancher/v2.x/en/backups/restorations/k8s-restore/rke-restore - - /rancher/v2.x/en/backups/legacy/restore/k8s-restore/rke-restore/ - - /rancher/v2.x/en/backups/legacy/restore/rke-restore ---- - -This procedure describes how to use RKE to restore a snapshot of the Rancher Kubernetes cluster. -This will restore the Kubernetes configuration and the Rancher database and state. - -> **Note:** This document covers clusters set up with RKE >= v0.2.x, for older RKE versions refer to the [RKE Documentation]({{}}/rke/latest/en/etcd-snapshots/restoring-from-backup). - -## Restore Outline - - - -- [1. Preparation](#1-preparation) -- [2. Place Snapshot](#2-place-snapshot) -- [3. Configure RKE](#3-configure-rke) -- [4. Restore the Database and bring up the Cluster](#4-restore-the-database-and-bring-up-the-cluster) - - - -### 1. Preparation - -It is advised that you run the restore from your local host or a jump box/bastion where your cluster yaml, rke statefile, and kubeconfig are stored. You will need [RKE]({{}}/rke/latest/en/installation/) and [kubectl]({{}}/rancher/v2.x/en/faq/kubectl/) CLI utilities installed locally. - -Prepare by creating 3 new nodes to be the target for the restored Rancher instance. We recommend that you start with fresh nodes and a clean state. For clarification on the requirements, review the [Installation Requirements](https://rancher.com/docs/rancher/v2.x/en/installation/requirements/). - -Alternatively you can re-use the existing nodes after clearing Kubernetes and Rancher configurations. This will destroy the data on these nodes. See [Node Cleanup]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) for the procedure. - -You must restore each of your etcd nodes to the same snapshot. Copy the snapshot you're using from one of your nodes to the others before running the `etcd snapshot-restore` command. - -> **IMPORTANT:** Before starting the restore make sure all the Kubernetes services on the old cluster nodes are stopped. We recommend powering off the nodes to be sure. - -### 2. Place Snapshot - -As of RKE v0.2.0, snapshots could be saved in an S3 compatible backend. To restore your cluster from the snapshot stored in S3 compatible backend, you can skip this step and retrieve the snapshot in [4. Restore the Database and bring up the Cluster](#4-restore-the-database-and-bring-up-the-cluster). Otherwise, you will need to place the snapshot directly on one of the etcd nodes. - -Pick one of the clean nodes that will have the etcd role assigned and place the zip-compressed snapshot file in `/opt/rke/etcd-snapshots` on that node. - -> **Note:** Because of a current limitation in RKE, the restore process does not work correctly if `/opt/rke/etcd-snapshots` is a NFS share that is mounted on all nodes with the etcd role. The easiest options are to either keep `/opt/rke/etcd-snapshots` as a local folder during the restore process and only mount the NFS share there after it has been completed, or to only mount the NFS share to one node with an etcd role in the beginning. - -### 3. Configure RKE - -Use your original `rancher-cluster.yml` and `rancher-cluster.rkestate` files. If they are not stored in a version control system, it is a good idea to back them up before making any changes. - -``` -cp rancher-cluster.yml rancher-cluster.yml.bak -cp rancher-cluster.rkestate rancher-cluster.rkestate.bak -``` - -If the replaced or cleaned nodes have been configured with new IP addresses, modify the `rancher-cluster.yml` file to ensure the address and optional internal_address fields reflect the new addresses. - -> **IMPORTANT:** You should not rename the `rancher-cluster.yml` or `rancher-cluster.rkestate` files. It is important that the filenames match each other. - -### 4. Restore the Database and bring up the Cluster - -You will now use the RKE command-line tool with the `rancher-cluster.yml` and the `rancher-cluster.rkestate` configuration files to restore the etcd database and bring up the cluster on the new nodes. - -> **Note:** Ensure your `rancher-cluster.rkestate` is present in the same directory as the `rancher-cluster.yml` file before starting the restore, as this file contains the certificate data for the cluster. - -#### Restoring from a Local Snapshot - -When restoring etcd from a local snapshot, the snapshot is assumed to be located on the target node in the directory `/opt/rke/etcd-snapshots`. - -``` -rke etcd snapshot-restore --name snapshot-name --config ./rancher-cluster.yml -``` - -> **Note:** The --name parameter expects the filename of the snapshot without the extension. - -#### Restoring from a Snapshot in S3 - -_Available as of RKE v0.2.0_ - -When restoring etcd from a snapshot located in an S3 compatible backend, the command needs the S3 information in order to connect to the S3 backend and retrieve the snapshot. - -``` -$ rke etcd snapshot-restore --config ./rancher-cluster.yml --name snapshot-name \ ---s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com \ ---folder folder-name # Available as of v2.3.0 -``` - -#### Options for `rke etcd snapshot-restore` - -S3 specific options are only available for RKE v0.2.0+. - -| Option | Description | S3 Specific | -| --- | --- | ---| -| `--name` value | Specify snapshot name | | -| `--config` value | Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] | | -| `--s3` | Enabled backup to s3 |* | -| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | -| `--access-key` value | Specify s3 accessKey | *| -| `--secret-key` value | Specify s3 secretKey | *| -| `--bucket-name` value | Specify s3 bucket name | *| -| `--folder` value | Specify s3 folder in the bucket name _Available as of v2.3.0_ | *| -| `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | - -#### Testing the Cluster - -Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl]({{}}/rancher/v2.x/en/faq/kubectl/#configuration) for details. - -#### Check Kubernetes Pods - -Wait for the pods running in `kube-system`, `ingress-nginx` and the `rancher` pod in `cattle-system` to return to the `Running` state. - -> **Note:** `cattle-cluster-agent` and `cattle-node-agent` pods will be in an `Error` or `CrashLoopBackOff` state until Rancher server is up and the DNS/Load Balancer have been pointed at the new cluster. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -cattle-system cattle-cluster-agent-766585f6b-kj88m 0/1 Error 6 4m -cattle-system cattle-node-agent-wvhqm 0/1 Error 8 8m -cattle-system rancher-78947c8548-jzlsr 0/1 Running 1 4m -ingress-nginx default-http-backend-797c5bc547-f5ztd 1/1 Running 1 4m -ingress-nginx nginx-ingress-controller-ljvkf 1/1 Running 1 8m -kube-system canal-4pf9v 3/3 Running 3 8m -kube-system cert-manager-6b47fc5fc-jnrl5 1/1 Running 1 4m -kube-system kube-dns-7588d5b5f5-kgskt 3/3 Running 3 4m -kube-system kube-dns-autoscaler-5db9bbb766-s698d 1/1 Running 1 4m -kube-system metrics-server-97bc649d5-6w7zc 1/1 Running 1 4m -kube-system tiller-deploy-56c4cf647b-j4whh 1/1 Running 1 4m -``` - -#### Finishing Up - -Rancher should now be running and available to manage your Kubernetes clusters. -> **IMPORTANT:** Remember to save your updated RKE config (`rancher-cluster.yml`) state file (`rancher-cluster.rkestate`) and `kubectl` credentials (`kube_config_rancher-cluster.yml`) files in a safe place for future maintenance for example in a version control system. diff --git a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1/_index.md b/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1/_index.md deleted file mode 100644 index 4838d0942..000000000 --- a/content/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Rolling back to v2.0.0-v2.1.5" -weight: 1 ---- - -> Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved here and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. - -If you are rolling back to versions in either of these scenarios, you must follow some extra instructions in order to get your clusters working. - -- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. -- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. - -Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321), special steps are necessary if the user wants to roll back to a previous version of Rancher where this vulnerability exists. The steps are as follows: - -1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. - - **Rancher Installed with Docker** - ``` - docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - - **Rancher Installed on a Kubernetes Cluster** - ``` - kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - -2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** - -3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/). - -4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. - -5. Apply the backed up tokens based on how you installed Rancher. - - **Rancher Installed with Docker** - - Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - docker exec $1 kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from Unavailable back to Available. - - **Rancher Installed on a Kubernetes Cluster** - - Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - Set the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from `Unavailable` back to `Available`. - -6. Continue using Rancher as normal. diff --git a/content/rancher/v2.x/en/backups/v2.5/_index.md b/content/rancher/v2.x/en/backups/v2.5/_index.md deleted file mode 100644 index 9b57e5bcd..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/_index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Backup and Restore in Rancher v2.5 -shortTitle: Rancher v2.5 -weight: 1 ---- - -In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. - -As of Rancher v2.5, the `rancher-backup` operator is used to backup and restore Rancher. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/main/charts/rancher-backup) - -The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. - -The Rancher version must be v2.5.0 and up to use this approach of backing up and restoring Rancher. - -> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. - -- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) - - [Backup and Restore for Rancher v2.5 installed with Docker](#backup-and-restore-for-rancher-v2-5-installed-with-docker) - - [Backup and Restore for Rancher installed on a Kubernetes Cluster Before v2.5](#backup-and-restore-for-rancher-installed-on-a-kubernetes-cluster-before-v2-5) -- [How Backups and Restores Work](#how-backups-and-restores-work) -- [Installing the rancher-backup Operator](#installing-the-rancher-backup-operator) - - [Installing rancher-backup with the Rancher UI](#installing-rancher-backup-with-the-rancher-ui) - - [Installing rancher-backup with the Helm CLI](#installing-rancher-backup-with-the-helm-cli) - - [RBAC](#rbac) -- [Backing up Rancher](#backing-up-rancher) -- [Restoring Rancher](#restoring-rancher) -- [Migrating Rancher to a New Cluster](#migrating-rancher-to-a-new-cluster) -- [Default Storage Location Configuration](#default-storage-location-configuration) - - [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) - -# Changes in Rancher v2.5 - -The new `rancher-backup` operator allows Rancher to be backed up and restored on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** page, or by using the Helm CLI. - -Previously, the way that cluster data was backed up depended on the type of Kubernetes cluster that was used. - -In Rancher v2.4, it was only supported to install Rancher on two types of Kubernetes clusters: an RKE cluster, or a K3s cluster with an external database. If Rancher was installed on an RKE cluster, RKE would be used to take a snapshot of the etcd database and restore the cluster. If Rancher was installed on a K3s cluster with an external database, the database would need to be backed up and restored using the upstream documentation for the database. - -In Rancher v2.5, it is now supported to install Rancher hosted Kubernetes clusters, such as Amazon EKS clusters, which do not expose etcd to a degree that would allow snapshots to be created by an external tool. etcd doesn't need to be exposed for `rancher-backup` to work, because the operator gathers resources by making calls to `kube-apiserver`. - -### Backup and Restore for Rancher v2.5 installed with Docker - -For Rancher installed with Docker, refer to the same steps used up till 2.5 for [backups](./docker-installs/docker-backups) and [restores.](./docker-installs/docker-backups) - -### Backup and Restore for Rancher installed on a Kubernetes Cluster Before v2.5 - -For Rancher before v2.5, the way that Rancher is backed up and restored differs based on the way that Rancher was installed. Our legacy backup and restore documentation is here: - -- For Rancher installed on an RKE Kubernetes cluster, refer to the legacy [backup]({{}}/rancher/v2.x/en/backups/legacy/backup/ha-backups) and [restore]({{}}/rancher/v2.x/en/backups/legacy/restore/rke-restore) documentation. -- For Rancher installed on a K3s Kubernetes cluster, refer to the legacy [backup]({{}}/rancher/v2.x/en/backups/legacy/backup/k3s-backups) and [restore]({{}}/rancher/v2.x/en/backups/legacy/restore/k3s-restore) documentation. - -# How Backups and Restores Work - -The `rancher-backup` operator introduces three custom resources: Backups, Restores, and ResourceSets. The following cluster-scoped custom resource definitions are added to the cluster: - -- `backups.resources.cattle.io` -- `resourcesets.resources.cattle.io` -- `restores.resources.cattle.io` - -The ResourceSet defines which Kubernetes resources need to be backed up. The ResourceSet is not available to be configured in the Rancher UI because the values required to back up Rancher are predefined. This ResourceSet should not be modified. - -When a Backup custom resource is created, the `rancher-backup` operator calls the `kube-apiserver` to get the resources in the ResourceSet (specifically, the predefined `rancher-resource-set`) that the Backup custom resource refers to. - -The operator then creates the backup file in the .tar.gz format and stores it in the location configured in the Backup resource. - -When a Restore custom resource is created, the operator accesses the backup .tar.gz file specified by the Restore, and restores the application from that file. - -The Backup and Restore custom resources can be created in the Rancher UI, or by using `kubectl apply`. - -# Installing the rancher-backup Operator - -The `rancher-backup` operator can be installed from the Rancher UI, or with the Helm CLI. In both cases, the `rancher-backup` Helm chart is installed on the Kubernetes cluster running the Rancher server. It is a cluster-admin only feature and available only for the **local** cluster. (*If you do not see `rancher-backup` in the Rancher UI, you may have selected the wrong cluster.*) - -### Installing rancher-backup with the Rancher UI - -1. In the Rancher UI's Cluster Manager, choose the cluster named **local** -1. On the upper-right click on the **Cluster Explorer.** -1. Click **Apps.** -1. Click the `rancher-backup` operator. -1. Optional: Configure the default storage location. For help, refer to the [configuration section.](./configuration/storage-config) - -**Result:** The `rancher-backup` operator is installed. - -From the **Cluster Explorer,** you can see the `rancher-backup` operator listed under **Deployments.** - -To configure the backup app in Rancher, click **Cluster Explorer** in the upper left corner and click **Rancher Backups.** - -### Installing rancher-backup with the Helm CLI - -Install the backup app as a Helm chart: - -``` -helm repo add rancher-charts https://charts.rancher.io -helm repo update -helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace -helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system -``` - -### RBAC - -Only the rancher admins and the local cluster’s cluster-owner can: - -* Install the Chart -* See the navigation links for Backup and Restore CRDs -* Perform a backup or restore by creating a Backup CR and Restore CR respectively -* List backups/restores performed so far - -# Backing up Rancher - -A backup is performed by creating a Backup custom resource. For a tutorial, refer to [this page.](./back-up-rancher) - -# Restoring Rancher - -A restore is performed by creating a Restore custom resource. For a tutorial, refer to [this page.](./restoring-rancher) - -# Migrating Rancher to a New Cluster - -A migration is performed by following [these steps.](./migrating-rancher) - -# Default Storage Location Configuration - -Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible or Minio object store. - -For information on configuring these options, refer to [this page.](./configuration/storage-config) - -### Example values.yaml for the rancher-backup Helm Chart - -The example [values.yaml file](./configuration/storage-config/#example-values-yaml-for-the-rancher-backup-helm-chart) can be used to configure the `rancher-backup` operator when the Helm CLI is used to install it. diff --git a/content/rancher/v2.x/en/backups/v2.5/back-up-rancher/_index.md b/content/rancher/v2.x/en/backups/v2.5/back-up-rancher/_index.md deleted file mode 100644 index 265a7582c..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/back-up-rancher/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Backing up Rancher -weight: 1 -aliases: - - /rancher/v2.x/en/backups/back-up-rancher ---- - -In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer the instructions for [single node backups]({{}}/rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups) - -The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. - -> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. - -### Prerequisites - -Rancher version must be v2.5.0 and up - -### 1. Install the `rancher-backup` operator - -The backup storage location is an operator-level setting, so it needs to be configured when `rancher-backup` is installed or upgraded. - -Backups are created as .tar.gz files. These files can be pushed to S3 or Minio, or they can be stored in a persistent volume. - -1. In the Rancher UI, go to the **Cluster Explorer** view for the local cluster. -1. Click **Apps.** -1. Click **Rancher Backups.** -1. Configure the default storage location. For help, refer to the [storage configuration section.](../configuration/storage-config) - -### 2. Perform a Backup - -To perform a backup, a custom resource of type Backup must be created. - -1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** -1. Click **Backup.** -1. Create the Backup with the form, or with the YAML editor. -1. For configuring the Backup details using the form, click **Create** and refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) -1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Backup YAML. This example Backup custom resource would create encrypted recurring backups in S3: - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Backup - metadata: - name: s3-recurring-backup - spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 10 - ``` - - > **Note:** When creating the Backup resource using YAML editor, the `resourceSetName` must be set to `rancher-resource-set` - - For help configuring the Backup, refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) - - > **Important:** The `rancher-backup` operator doesn't save the EncryptionConfiguration file. The contents of the EncryptionConfiguration file must be saved when an encrypted backup is created, and the same file must be used when restoring from this backup. -1. Click **Create.** - -**Result:** The backup file is created in the storage location configured in the Backup custom resource. The name of this file is used when performing a restore. - diff --git a/content/rancher/v2.x/en/backups/v2.5/configuration/_index.md b/content/rancher/v2.x/en/backups/v2.5/configuration/_index.md deleted file mode 100644 index 89b023384..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/configuration/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Rancher Backup Configuration Reference -shortTitle: Configuration -weight: 4 -aliases: - - /rancher/v2.x/en/backups/configuration ---- - -- [Backup configuration](./backup-config) -- [Restore configuration](./restore-config) -- [Storage location configuration](./storage-config) -- [Example Backup and Restore Custom Resources](../examples) \ No newline at end of file diff --git a/content/rancher/v2.x/en/backups/v2.5/configuration/backup-config/_index.md b/content/rancher/v2.x/en/backups/v2.5/configuration/backup-config/_index.md deleted file mode 100644 index bd4bbf55a..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/configuration/backup-config/_index.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: Backup Configuration -shortTitle: Backup -weight: 1 -aliases: - - /rancher/v2.x/en/backups/configuration/backup-config ---- - -The Backup Create page lets you configure a schedule, enable encryption and specify the storage location for your backups. - -{{< img "/img/rancher/backup_restore/backup/backup.png" "">}} - -- [Schedule](#schedule) -- [Encryption](#encryptionconfigname) -- [Storage Location](#storagelocation) - - [S3](#s3) - - [Example S3 Storage Configuration](#example-s3-storage-configuration) - - [Example MinIO Configuration](#example-minio-configuration) - - [Example credentialSecret](#example-credentialsecret) - - [IAM Permissions for EC2 Nodes to Access S3](#iam-permissions-for-ec2-nodes-to-access-s3) -- [RetentionCount](#retentioncount) -- [Examples](#examples) - - -# Schedule - -Select the first option to perform a one-time backup, or select the second option to schedule recurring backups. Selecting **Recurring Backups** lets you configure following two fields: - -- **Schedule**: This field accepts - - Standard [cron expressions](https://en.wikipedia.org/wiki/Cron), such as `"0 * * * *"` - - Descriptors, such as `"@midnight"` or `"@every 1h30m"` -- **Retention Count**: This value specifies how many backup files must be retained. If files exceed the given retentionCount, the oldest files will be deleted. The default value is 10. - -{{< img "/img/rancher/backup_restore/backup/schedule.png" "">}} - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `schedule` | Provide the cron string for scheduling recurring backups. | -| `retentionCount` | Provide the number of backup files to be retained. | - -# Encryption - -The rancher-backup gathers resources by making calls to the kube-apiserver. Objects returned by apiserver are decrypted, so even if [encryption At rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) is enabled, even the encrypted objects gathered by the backup will be in plaintext. - -To avoid storing them in plaintext, you can use the same encryptionConfig file that was used for at-rest encryption, to encrypt certain resources in your backup. - -> **Important:** You must save the encryptionConfig file, because it won’t be saved by the rancher-backup operator. -The same encryptionFile needs to be used when performing a restore. - -The operator consumes this encryptionConfig as a Kubernetes Secret, and the Secret must be in the operator’s namespace. Rancher installs the `rancher-backup` operator in the `cattle-resources-system` namespace, so create this encryptionConfig secret in that namespace. - -For the `EncryptionConfiguration`, you can use the [sample file provided in the Kubernetes documentation.](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) - -To create the Secret, the encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. - -Save the `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: - -``` -kubectl create secret generic encryptionconfig \ - --from-file=./encryption-provider-config.yaml \ - -n cattle-resources-system -``` - -This will ensure that the secret contains a key named `encryption-provider-config.yaml`, and the operator will use this key to get the encryption configuration. - -The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key - -{{< img "/img/rancher/backup_restore/backup/encryption.png" "">}} - -In the example command above, the name `encryptionconfig` can be changed to anything. - - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | - -# Storage Location - -{{< img "/img/rancher/backup_restore/backup/storageLocation.png" "">}} - -If the StorageLocation is specified in the Backup, the operator will retrieve the backup location from that particular S3 bucket. If not specified, the operator will try to find this file in the default operator-level S3 store, and in the operator-level PVC store. The default storage location is configured during the deployment of the `rancher-backup` operator. - -Selecting the first option stores this backup in the storage location configured while installing the rancher-backup chart. The second option lets you configure a different S3 compatible storage provider for storing the backup. - -### S3 - -The S3 storage location contains the following configuration fields: - -1. **Credential Secret** (optional): If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) -1. **Bucket Name**: The name of the S3 bucket where backup files will be stored. -1. **Region** (optional): The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. This field isn't needed for configuring MinIO. -1. **Folder** (optional): The name of the folder in the S3 bucket where backup files will be stored. -1. **Endpoint**: The [endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) that is used to access S3 in the region of your bucket. -1. **Endpoint CA** (optional): This should be the Base64 encoded CA cert. For an example, refer to the [example S3 compatible configuration.](#example-s3-compatible-storage-configuration) -1. **Skip TLS Verifications** (optional): Set to true if you are not using TLS. - - -| YAML Directive Name | Description | Required | -| ---------------- | ---------------- | ------------ | -| `credentialSecretName` | If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace as long as you provide that namespace in `credentialSecretNamespace`. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | -| `credentialSecretNamespace` | The namespace of the secret containing the credentials to access S3. This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | -| `bucketName` | The name of the S3 bucket where backup files will be stored. | ✓ | -| `folder` | The name of the folder in the S3 bucket where backup files will be stored. | | -| `region` | The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | ✓ | -| `endpoint` | The [endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) that is used to access S3 in the region of your bucket. | ✓ | -| `endpointCA` | This should be the Base64 encoded CA cert. For an example, refer to the [example S3 compatible configuration.](#example-s3-compatible-storage-configuration) | | -| `insecureTLSSkipVerify` | Set to true if you are not using TLS. | | - -### Example S3 Storage Configuration - -```yaml -s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com -``` - -### Example MinIO Configuration - -```yaml -s3: - credentialSecretName: minio-creds - bucketName: rancherbackups - endpoint: minio.35.202.130.254.sslip.io - endpointCA: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t -``` -### Example credentialSecret - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: creds -type: Opaque -data: - accessKey: - secretKey: -``` - -Make sure to encode the keys to base64 in YAML file. -Run the following command to encode the keys. -``` -echo -n "your_key" |base64 -``` - -### IAM Permissions for EC2 Nodes to Access S3 - -There are two ways to set up the `rancher-backup` operator to use S3 as the backup storage location. - -One way is to configure the `credentialSecretName` in the Backup custom resource, which refers to AWS credentials that have access to S3. - -If the cluster nodes are in Amazon EC2, the S3 access can also be set up by assigning IAM permissions to the EC2 nodes so that they can access S3. - -To allow a node to access S3, follow the instructions in the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-instance-access-s3-bucket/) to create an IAM role for EC2. When you add a custom policy to the role, add the following permissions, and replace the `Resource` with your bucket name: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::rancher-backups" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:DeleteObject", - "s3:PutObjectAcl" - ], - "Resource": [ - "arn:aws:s3:::rancher-backups/*" - ] - } - ] -} -``` - -After the role is created, and you have attached the corresponding instance profile to your EC2 instance(s), the `credentialSecretName` directive can be left empty in the Backup custom resource. - -# Examples - -For example Backup custom resources, refer to [this page.](../../examples/#backup) diff --git a/content/rancher/v2.x/en/backups/v2.5/configuration/restore-config/_index.md b/content/rancher/v2.x/en/backups/v2.5/configuration/restore-config/_index.md deleted file mode 100644 index b53c49526..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/configuration/restore-config/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Restore Configuration -shortTitle: Restore -weight: 2 -aliases: - - /rancher/v2.x/en/backups/configuration/restore-config ---- - -The Restore Create page lets you provide details of the backup to restore from - -{{< img "/img/rancher/backup_restore/restore/restore.png" "">}} - -- [Backup Source](#backup-source) - - [An Existing Backup Config](#an-existing-backup-config) - - [The default storage target](#the-default-storage-target) - - [An S3-compatible object store](#an-s3-compatible-object-store) -- [Encryption](#encryption) -- [Prune during restore](#prune-during-restore) -- [Getting the Backup Filename from S3](#getting-the-backup-filename-from-s3) - -# Backup Source -Provide details of the backup file and its storage location, which the operator will then use to perform the restore. Select from the following options to provide these details - - - - -### An existing backup config - -Selecting this option will populate the **Target Backup** dropdown with the Backups available in this cluster. Select the Backup from the dropdown, and that will fill out the **Backup Filename** field for you, and will also pass the backup source information from the selected Backup to the operator. - -{{< img "/img/rancher/backup_restore/restore/existing.png" "">}} - -If the Backup custom resource does not exist in the cluster, you need to get the exact filename and provide the backup source details with the default storage target or an S3-compatible object store. - - -### The default storage target - -Select this option if you are restoring from a backup file that exists in the default storage location configured at the operator-level. The operator-level configuration is the storage location that was configured when the `rancher-backup` operator was installed or upgraded. Provide the exact filename in the **Backup Filename** field. - -{{< img "/img/rancher/backup_restore/restore/default.png" "">}} - -### An S3-compatible object store - -Select this option if no default storage location is configured at the operator-level, OR if the backup file exists in a different S3 bucket than the one configured as the default storage location. Provide the exact filename in the **Backup Filename** field. Refer to [this section](#getting-the-backup-filename-from-s3) for exact steps on getting the backup filename from s3. Fill in all the details for the S3 compatible object store. Its fields are exactly same as ones for the `backup.StorageLocation` configuration in the [Backup custom resource.](../../configuration/backup-config/#storagelocation) - -{{< img "/img/rancher/backup_restore/restore/s3store.png" "">}} - -# Encryption - -If the backup was created with encryption enabled, its file will have `.enc` suffix. Choosing such a Backup, or providing a backup filename with `.enc` suffix will display another dropdown named **Encryption Config Secret**. - -{{< img "/img/rancher/backup_restore/restore/encryption.png" "">}} - -The Secret selected from this dropdown must have the same contents as the one used for the Backup custom resource while performing the backup. If the encryption configuration doesn't match, the restore will fail - -The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | - -> **Important** -This field should only be set if the backup was created with encryption enabled. Providing the incorrect encryption config will cause the restore to fail. - -# Prune During Restore - -* **Prune**: In order to fully restore Rancher from a backup, and to go back to the exact state it was at when the backup was performed, we need to delete any additional resources that were created by Rancher after the backup was taken. The operator does so if the **Prune** flag is enabled. Prune is enabled by default and it is recommended to keep it enabled. -* **Delete Timeout**: This is the amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `prune` | Delete the resources managed by Rancher that are not present in the backup (Recommended). | -| `deleteTimeoutSeconds` | Amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. | - -# Getting the Backup Filename from S3 - -This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. - -To obtain this file name from S3, go to your S3 bucket (and folder if it was specified while performing backup). - -Copy the filename and store it in your Restore custom resource. So assuming the name of your backup file is `backupfile`, - -- If your bucket name is `s3bucket` and no folder was specified, then the `backupFilename` to use will be `backupfile`. -- If your bucket name is `s3bucket` and the base folder is`s3folder`, the `backupFilename` to use is only `backupfile` . -- If there is a subfolder inside `s3Folder` called `s3sub`, and that has your backup file, then the `backupFilename` to use is `s3sub/backupfile`. - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `backupFilename` | This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. | diff --git a/content/rancher/v2.x/en/backups/v2.5/configuration/storage-config/_index.md b/content/rancher/v2.x/en/backups/v2.5/configuration/storage-config/_index.md deleted file mode 100644 index be6622c5b..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/configuration/storage-config/_index.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Backup Storage Location Configuration -shortTitle: Storage -weight: 3 -aliases: - - /rancher/v2.x/en/backups/configuration/storage-config ---- - -Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible object store. - -Only one storage location can be configured at the operator level. - -- [Storage Location Configuration](#storage-location-configuration) - - [No Default Storage Location](#no-default-storage-location) - - [S3-compatible Object Store](#s3-compatible-object-store) - - [Use an existing StorageClass](#existing-storageclass) - - [Use an existing PersistentVolume](#existing-persistent-volume) -- [Encryption](#encryption) -- [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) - -# Storage Location Configuration - -### No Default Storage Location - -You can choose to not have any operator-level storage location configured. If you select this option, you must configure an S3-compatible object store as the storage location for each individual backup. - -### S3-compatible Object Store - -| Parameter | Description | -| -------------- | -------------- | -| Credential Secret | Choose the credentials for S3 from your secrets in Rancher. [Example]({{}}/rancher/v2.x/en/backups/v2.5/examples/#example-credential-secret-for-storing-backups-in-s3). | -| Bucket Name | Enter the name of the [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html) where the backups will be stored. Default: `rancherbackups`. | -| Region | The [AWS region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | -| Folder | The [folder in the S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-folders.html) where the backups will be stored. | -| Endpoint | The [S3 endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) For example, `s3.us-west-2.amazonaws.com`. | -| Endpoint CA | The CA cert used to for the S3 endpoint. Default: base64 encoded CA cert | -| insecureTLSSkipVerify | Set to true if you are not using TLS. | - -### Existing StorageClass - -Installing the `rancher-backup` chart by selecting the StorageClass option will create a Persistent Volume Claim (PVC), and Kubernetes will in turn dynamically provision a Persistent Volume (PV) where all the backups will be saved by default. - -For information about creating storage classes refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/#1-add-a-storage-class-and-configure-it-to-use-your-storage-provider) - -> **Important** -It is highly recommended to use a StorageClass with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. -If no such StorageClass is available, after the PV is provisioned, make sure to edit its reclaim policy and set it to "Retain" before storing backups in it. - -### Existing Persistent Volume - -Select an existing Persistent Volume (PV) that will be used to store your backups. For information about creating PersistentVolumes in Rancher, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) - -> **Important** -It is highly recommended to use a Persistent Volume with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. - - -# Example values.yaml for the rancher-backup Helm Chart - - -This values.yaml file can be used to configure `rancher-backup` operator when the Helm CLI is used to install it. - -For more information about `values.yaml` files and configuring Helm charts during installation, refer to the [Helm documentation.](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing) - -```yaml -image: - repository: rancher/backup-restore-operator - tag: v1.0.3 - -## Default s3 bucket for storing all backup files created by the rancher-backup operator -s3: - enabled: false - ## credentialSecretName if set, should be the name of the Secret containing AWS credentials. - ## To use IAM Role, don't set this field - credentialSecretName: creds - credentialSecretNamespace: "" - region: us-west-2 - bucketName: rancherbackups - folder: base folder - endpoint: s3.us-west-2.amazonaws.com - endpointCA: base64 encoded CA cert - # insecureTLSSkipVerify: optional - -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## If persistence is enabled, operator will create a PVC with mountPath /var/lib/backups -persistence: - enabled: false - - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack). - ## Refer to https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 - ## - storageClass: "-" - - ## If you want to disable dynamic provisioning by setting storageClass to "-" above, - ## and want to target a particular PV, provide name of the target volume - volumeName: "" - - ## Only certain StorageClasses allow resizing PVs; Refer to https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/ - size: 2Gi - - -global: - cattle: - systemDefaultRegistry: "" - -nodeSelector: {} - -tolerations: [] - -affinity: {} -``` diff --git a/content/rancher/v2.x/en/backups/v2.5/docker-installs/_index.md b/content/rancher/v2.x/en/backups/v2.5/docker-installs/_index.md deleted file mode 100644 index 160147e59..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/docker-installs/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Backup and Restore for Rancher Installed with Docker -shortTitle: Docker Installs -weight: 10 -aliases: - - /rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/ ---- - -The steps for backing up and restoring Rancher installed with Docker did not change in Rancher v2.5. - -- [Backups](./docker-backups) -- [Restores](./docker-restores) \ No newline at end of file diff --git a/content/rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups/_index.md b/content/rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups/_index.md deleted file mode 100644 index f428719d4..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Backing up Rancher Installed with Docker -shortTitle: Backups -weight: 3 -aliases: - - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/backups/backups/single-node-backups/ - - /rancher/v2.x/en/backups/legacy/backup/single-node-backups/ ---- - - -After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. - -## Before You Start - -During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. - -## Creating a Backup - -This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. - - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` -1. Use the command below, replacing each [placeholder](#before-you-start), to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data- rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each [placeholder](#before-you-start). - - ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** A stream of commands runs on the screen. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. - -1. Restart Rancher Server. Replace `` with the name of your [Rancher container](#before-you-start). - - ``` - docker start - ``` - -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.x/en/backups/v2.5/docker-installs/docker-restores/_index.md b/content/rancher/v2.x/en/backups/v2.5/docker-installs/docker-restores/_index.md deleted file mode 100644 index caab667f8..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/docker-installs/docker-restores/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Restoring Backups—Docker Installs -shortTitle: Restores -weight: 3 -aliases: - - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/backups/restorations/single-node-restoration ---- - -If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. - -## Before You Start - -During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from -v $PWD:/backup \ -busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar pzxvf /backup/rancher-data-backup--" -``` - -In this command, `` and `-` are environment variables for your Rancher deployment. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version number for your Rancher backup. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Restoring Backups - -Using a [backup]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Enter the following command to delete your current state data and replace it with your backup data, replacing the [placeholders](#before-you-start). Don't forget to close the quotes. - - >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. - - ``` - docker run --volumes-from -v $PWD:/backup \ - busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar pzxvf /backup/rancher-data-backup--.tar.gz" - ``` - - **Step Result:** A series of commands should run. - -1. Restart your Rancher Server container, replacing the [placeholder](#before-you-start). It will restart using your backup data. - - ``` - docker start - ``` - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.x/en/backups/v2.5/examples/_index.md b/content/rancher/v2.x/en/backups/v2.5/examples/_index.md deleted file mode 100644 index f0b60ac37..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/examples/_index.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: Examples -weight: 5 -aliases: - - /rancher/v2.x/en/backups/examples ---- - -This section contains examples of Backup and Restore custom resources. - -The default backup storage location is configured when the `rancher-backup` operator is installed or upgraded. - -Encrypted backups can only be restored if the Restore custom resource uses the same encryption configuration secret that was used to create the backup. - -- [Backup](#backup) - - [Backup in the default location with encryption](#backup-in-the-default-location-with-encryption) - - [Recurring backup in the default location](#recurring-backup-in-the-default-location) - - [Encrypted recurring backup in the default location](#encrypted-recurring-backup-in-the-default-location) - - [Encrypted backup in Minio](#encrypted-backup-in-minio) - - [Backup in S3 using AWS credential secret](#backup-in-s3-using-aws-credential-secret) - - [Recurring backup in S3 using AWS credential secret](#recurring-backup-in-s3-using-aws-credential-secret) - - [Backup from EC2 nodes with IAM permission to access S3](#backup-from-ec2-nodes-with-iam-permission-to-access-s3) -- [Restore](#restore) - - [Restore using the default backup file location](#restore-using-the-default-backup-file-location) - - [Restore for Rancher migration](#restore-for-rancher-migration) - - [Restore from encrypted backup](#restore-from-encrypted-backup) - - [Restore an encrypted backup from Minio](#restore-an-encrypted-backup-from-minio) - - [Restore from backup using an AWS credential secret to access S3](#restore-from-backup-using-an-aws-credential-secret-to-access-s3) - - [Restore from EC2 nodes with IAM permissions to access S3](#restore-from-ec2-nodes-with-iam-permissions-to-access-s3) -- [Example Credential Secret for Storing Backups in S3](#example-credential-secret-for-storing-backups-in-s3) -- [Example EncryptionConfiguration](#example-encryptionconfiguration) - -# Backup - -This section contains example Backup custom resources. - -### Backup in the Default Location with Encryption - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: default-location-encrypted-backup -spec: - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -### Recurring Backup in the Default Location - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: default-location-recurring-backup -spec: - resourceSetName: rancher-resource-set - schedule: "@every 1h" - retentionCount: 10 -``` - -### Encrypted Recurring Backup in the Default Location - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: default-enc-recurring-backup -spec: - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 3 -``` - -### Encrypted Backup in Minio - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: minio-backup -spec: - storageLocation: - s3: - credentialSecretName: minio-creds - credentialSecretNamespace: default - bucketName: rancherbackups - endpoint: minio.sslip.io - endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -### Backup in S3 Using AWS Credential Secret - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: s3-backup -spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -### Recurring Backup in S3 Using AWS Credential Secret - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: s3-recurring-backup -spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 10 -``` - -### Backup from EC2 Nodes with IAM Permission to Access S3 - -This example shows that the AWS credential secret does not have to be provided to create a backup if the nodes running `rancher-backup` have [these permissions for access to S3.](../configuration/backup-config/#iam-permissions-for-ec2-nodes-to-access-s3) - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: s3-iam-backup -spec: - storageLocation: - s3: - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -# Restore - -This section contains example Restore custom resources. - -### Restore Using the Default Backup File Location - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-default -spec: - backupFilename: default-location-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-29-54-07-00.tar.gz -# encryptionConfigSecretName: test-encryptionconfig -``` - -### Restore for Rancher Migration -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-migration -spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - prune: false - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com -``` - -### Restore from Encrypted Backup - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-encrypted -spec: - backupFilename: default-test-s3-def-backup-c583d8f2-6daf-4648-8ead-ed826c591471-2020-08-24T20-47-05Z.tar.gz - encryptionConfigSecretName: encryptionconfig -``` - -### Restore an Encrypted Backup from Minio - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-minio -spec: - backupFilename: default-minio-backup-demo-aa5c04b7-4dba-4c48-9ac4-ab7916812eaa-2020-08-30T13-18-17-07-00.tar.gz - storageLocation: - s3: - credentialSecretName: minio-creds - credentialSecretNamespace: default - bucketName: rancherbackups - endpoint: minio.sslip.io - endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t - encryptionConfigSecretName: test-encryptionconfig -``` - -### Restore from Backup Using an AWS Credential Secret to Access S3 - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-s3-demo -spec: - backupFilename: test-s3-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-49-34-07-00.tar.gz.enc - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - encryptionConfigSecretName: test-encryptionconfig -``` - -### Restore from EC2 Nodes with IAM Permissions to Access S3 - -This example shows that the AWS credential secret does not have to be provided to restore from backup if the nodes running `rancher-backup` have [these permissions for access to S3.](../configuration/backup-config/#iam-permissions-for-ec2-nodes-to-access-s3) - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-s3-demo -spec: - backupFilename: default-test-s3-recurring-backup-84bf8dd8-0ef3-4240-8ad1-fc7ec308e216-2020-08-24T10#52#44-07#00.tar.gz - storageLocation: - s3: - bucketName: rajashree-backup-test - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - encryptionConfigSecretName: test-encryptionconfig -``` - -# Example Credential Secret for Storing Backups in S3 - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: creds -type: Opaque -data: - accessKey: - secretKey: -``` - -# Example EncryptionConfiguration - -```yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aesgcm: - keys: - - name: key1 - secret: c2VjcmV0IGlzIHNlY3VyZQ== - - name: key2 - secret: dGhpcyBpcyBwYXNzd29yZA== - - aescbc: - keys: - - name: key1 - secret: c2VjcmV0IGlzIHNlY3VyZQ== - - name: key2 - secret: dGhpcyBpcyBwYXNzd29yZA== - - secretbox: - keys: - - name: key1 - secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= -``` diff --git a/content/rancher/v2.x/en/backups/v2.5/migrating-rancher/_index.md b/content/rancher/v2.x/en/backups/v2.5/migrating-rancher/_index.md deleted file mode 100644 index 2543b4db5..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/migrating-rancher/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Migrating Rancher to a New Cluster -weight: 3 -aliases: - - /rancher/v2.x/en/backups/migrating-rancher ---- - -If you are migrating Rancher to a new Kubernetes cluster, you don't need to install Rancher on the new cluster first. If Rancher is restored to a new cluster with Rancher already installed, it can cause problems. - -### Prerequisites - -These instructions assume you have [created a backup](../back-up-rancher) and you have already installed a new Kubernetes cluster where Rancher will be deployed. - -It is required to use the same hostname that was set as the server URL in the first cluster. - -Rancher version must be v2.5.0 and up - -Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes clusters such as Amazon EKS clusters. For help installing Kubernetes, refer to the documentation of the Kubernetes distribution. One of Rancher's Kubernetes distributions may also be used: - -- [RKE Kubernetes installation docs]({{}}/rke/latest/en/installation/) -- [K3s Kubernetes installation docs]({{}}/k3s/latest/en/installation/) - -### 1. Install the rancher-backup Helm chart -``` -helm repo add rancher-charts https://charts.rancher.io -helm repo update -helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace -helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system -``` - -### 2. Restore from backup using a Restore custom resource - -If you are using an S3 store as the backup source, and need to use your S3 credentials for restore, create a secret in this cluster using your S3 credentials. The Secret data must have two keys, `accessKey` and `secretKey` containing the s3 credentials like this: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: s3-creds -type: Opaque -data: - accessKey: - secretKey: -``` - -This secret can be created in any namespace, with the above example it will get created in the default namespace - -In the Restore custom resource, `prune` must be set to false. - -Create a Restore custom resource like the example below: - -```yaml -# migrationResource.yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-migration -spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - prune: false - encryptionConfigSecretName: encryptionconfig - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: backup-test - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com -``` - -> **Important:** The field `encryptionConfigSecretName` must be set only if your backup was created with encryption enabled. Provide the name of the Secret containing the encryption config file. If you only have the encryption config file, but don't have a secret created with it in this cluster, use the following steps to create the secret: -1. The encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. So save your `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: - -``` -kubectl create secret generic encryptionconfig \ - --from-file=./encryption-provider-config.yaml \ - -n cattle-resources-system -``` - -Then apply the resource: - -``` -kubectl apply -f migrationResource.yaml -``` - -### 3. Install cert-manager - -Follow the steps to [install cert-manager]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/#5-install-cert-manager) in the documentation about installing cert-manager on Kubernetes. - -### 4. Bring up Rancher with Helm - -Use the same version of Helm to install Rancher, that was used on the first cluster. - -``` -helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname= \ -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/backups/v2.5/restoring-rancher/_index.md b/content/rancher/v2.x/en/backups/v2.5/restoring-rancher/_index.md deleted file mode 100644 index e7cbfbd41..000000000 --- a/content/rancher/v2.x/en/backups/v2.5/restoring-rancher/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Restoring Rancher -weight: 2 -aliases: - - /rancher/v2.x/en/installation/backups/restores - - /rancher/v2.x/en/backups/restoring-rancher ---- - -A restore is performed by creating a Restore custom resource. - -> **Important** -> -> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.](../migrating-rancher) -> * While restoring rancher on the same setup, the operator will scale down the rancher deployment when restore starts, and it will scale back up the deployment once restore completes. So Rancher will be unavailable during the restore. -> * When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. - -### Create the Restore Custom Resource - -1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** -1. Click **Restore.** -1. Create the Restore with the form, or with YAML. For creating the Restore resource using form, refer to the [ [configuration reference]({{}}/rancher/v2.5/en/backups/v2.5/configuration/restore-config/) and to the [examples.]({{}}/rancher/v2.5/en/backups/v2.5/examples/) -1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Restore YAML. - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Restore - metadata: - name: restore-migration - spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - encryptionConfigSecretName: encryptionconfig - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - ``` - - For help configuring the Restore, refer to the [configuration reference]({{}}/rancher/v2.5/en/backups/v2.5/configuration/restore-config/) and to the [examples.]({{}}/rancher/v2.5/en/backups/v2.5/examples/) - -1. Click **Create.** - -**Result:** The rancher-operator scales down the rancher deployment during restore, and scales it back up once the restore completes. The resources are restored in this order: - -1. Custom Resource Definitions (CRDs) -2. Cluster-scoped resources -3. Namespaced resources - -### Logs - -To check how the restore is progressing, you can check the logs of the operator. Run this command to follow the logs: - -``` -kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f -``` - -### Cleanup - -If you created the restore resource with kubectl, remove the resource to prevent a naming conflict with future restores. diff --git a/content/rancher/v2.x/en/best-practices/_index.md b/content/rancher/v2.x/en/best-practices/_index.md deleted file mode 100644 index 8e90b638a..000000000 --- a/content/rancher/v2.x/en/best-practices/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Best Practices Guide -weight: 4 ---- - -The purpose of this section is to consolidate best practices for Rancher implementations. - -If you are using Rancher v2.0-v2.4, refer to the Best Practices Guide [here.](./v2.0-v2.4) - -If you are using Rancher v2.5, refer to the Best Practices Guide [here.](./v2.5) \ No newline at end of file diff --git a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/_index.md b/content/rancher/v2.x/en/best-practices/v2.0-v2.4/_index.md deleted file mode 100644 index 712e6daaf..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Best Practices Guide for Rancher v2.0-v2.4 -shortTitle: v2.0-v2.4 -weight: 2 ---- - -The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. - -If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. - -Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. - -For more guidance on best practices, you can consult these resources: - -- [Security]({{}}/rancher/v2.x/en/security/) -- [Rancher Blog](https://rancher.com/blog/) - - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) -- [Rancher Forum](https://forums.rancher.com/) -- [Rancher Users Slack](https://slack.rancher.io/) -- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/containers/_index.md b/content/rancher/v2.x/en/best-practices/v2.0-v2.4/containers/_index.md deleted file mode 100644 index f75f04923..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/containers/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Tips for Setting Up Containers -weight: 100 -aliases: - - /rancher/v2.x/en/best-practices/containers ---- - -Running well-built containers can greatly impact the overall performance and security of your environment. - -Below are a few tips for setting up your containers. - -For a more detailed discussion of security for containers, you can also refer to Rancher's [Guide to Container Security.](https://rancher.com/complete-guide-container-security) - -### Use a Common Container OS - -When possible, you should try to standardize on a common container base OS. - -Smaller distributions such as Alpine and BusyBox reduce container image size and generally have a smaller attack/vulnerability surface. - -Popular distributions such as Ubuntu, Fedora, and CentOS are more field-tested and offer more functionality. - -### Start with a FROM scratch container -If your microservice is a standalone static binary, you should use a FROM scratch container. - -The FROM scratch container is an [official Docker image](https://hub.docker.com/_/scratch) that is empty so that you can use it to design minimal images. - -This will have the smallest attack surface and smallest image size. - -### Run Container Processes as Unprivileged -When possible, use a non-privileged user when running processes within your container. While container runtimes provide isolation, vulnerabilities and attacks are still possible. Inadvertent or accidental host mounts can also be impacted if the container is running as root. For details on configuring a security context for a pod or container, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - -### Define Resource Limits -Apply CPU and memory limits to your pods. This can help manage the resources on your worker nodes and avoid a malfunctioning microservice from impacting other microservices. - -In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the Rancher docs. - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project or namespace, all containers will require a respective CPU or Memory field set during creation. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -The Kubernetes docs have more information on how resource limits can be set at the [container level](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) and the namespace level. - -### Define Resource Requirements -You should apply CPU and memory requirements to your pods. This is crucial for informing the scheduler which type of compute node your pod needs to be placed on, and ensuring it does not over-provision that node. In Kubernetes, you can set a resource requirement by defining `resources.requests` in the resource requests field in a pod's container spec. For details, refer to the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). - -> **Note:** If you set a resource limit for the namespace that the pod is deployed in, and the container doesn't have a specific resource request, the pod will not be allowed to start. To avoid setting these fields on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -It is recommended to define resource requirements on the container level because otherwise, the scheduler makes assumptions that will likely not be helpful to your application when the cluster experiences load. - -### Liveness and Readiness Probes -Set up liveness and readiness probes for your container. Unless your container completely crashes, Kubernetes will not know it's unhealthy unless you create an endpoint or mechanism that can report container status. Alternatively, make sure your container halts and crashes if unhealthy. - -The Kubernetes docs show how to [configure liveness and readiness probes for containers.](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) diff --git a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-strategies/_index.md b/content/rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-strategies/_index.md deleted file mode 100644 index e142a45fe..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-strategies/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Rancher Deployment Strategies -weight: 100 -aliases: - - /rancher/v2.x/en/best-practices/deployment-strategies ---- - -There are two recommended deployment strategies. Each one has its own pros and cons. Read more about which one would fit best for your use case: - -* [Hub and Spoke](#hub-and-spoke) -* [Regional](#regional) - -# Hub & Spoke Strategy ---- - -In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. - -{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} - -### Pros - -* Environments could have nodes and network connectivity across regions. -* Single control plane interface to view/see all regions and environments. -* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. - -### Cons - -* Subject to network latencies. -* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. - -# Regional Strategy ---- -In the regional deployment model a control plane is deployed in close proximity to the compute nodes. - -{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} - -### Pros - -* Rancher functionality in regions stay operational if a control plane in another region goes down. -* Network latency is greatly reduced, improving the performance of functionality in Rancher. -* Upgrades of the Rancher control plane can be done independently per region. - -### Cons - -* Overhead of managing multiple Rancher installations. -* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. -* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-types/_index.md b/content/rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-types/_index.md deleted file mode 100644 index 414079641..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-types/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Tips for Running Rancher -weight: 100 -aliases: - - /rancher/v2.x/en/best-practices/deployment-types ---- - -A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. - -When you set up your high-availability Rancher installation, consider the following: - -### Run Rancher on a Separate Cluster -Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. - -### Don't Run Rancher on a Hosted Kubernetes Environment -When the Rancher server is installed on a Kubernetes cluster, it should not be run in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. - -It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE]({{}}/rke/latest/en/etcd-snapshots/) or [Rancher]({{}}/rancher/v2.x/en/backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. - -### Make sure nodes are configured correctly for Kubernetes ### -It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) - -### When using RKE: Backup the Statefile -RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. - -### Run All Nodes in the Cluster in the Same Datacenter -For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. - -### Development and Production Environments Should be Similar -It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. - -### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. - -However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. - -After you [enable monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. - diff --git a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/management/_index.md b/content/rancher/v2.x/en/best-practices/v2.0-v2.4/management/_index.md deleted file mode 100644 index f6c12740d..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.0-v2.4/management/_index.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: Tips for Scaling, Security and Reliability -weight: 101 -aliases: - - /v2.x/en/best-practices/management ---- - -Rancher allows you to set up numerous combinations of configurations. Some configurations are more appropriate for development and testing, while there are other best practices for production environments for maximum availability and fault tolerance. The following best practices should be followed for production. - -- [Tips for Preventing and Handling Problems](#tips-for-preventing-and-handling-problems) -- [Network Topology](#network-topology) -- [Tips for Scaling and Reliability](#tips-for-scaling-and-reliability) -- [Tips for Security](#tips-for-security) -- [Tips for Multi-Tenant Clusters](#tips-for-multi-tenant-clusters) -- [Class of Service and Kubernetes Clusters](#class-of-service-and-kubernetes-clusters) -- [Network Security](#network-security) - -# Tips for Preventing and Handling Problems - -These tips can help you solve problems before they happen. - -### Run Rancher on a Supported OS and Supported Docker Version -Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation]({{}}/rancher/v2.x/en/installation/requirements/) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. - -### Upgrade Your Kubernetes Version -Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). - -Rancher’s SLAs are not community dependent, but as Kubernetes is a community-driven software, the quality of experience will degrade as you get farther away from the community's supported target. - -### Kill Pods Randomly During Testing -Run chaoskube or a similar mechanism to randomly kill pods in your test environment. This will test the resiliency of your infrastructure and the ability of Kubernetes to self-heal. It's not recommended to run this in your production environment. - -### Deploy Complicated Clusters with Terraform -Rancher's "Add Cluster" UI is preferable for getting started with Kubernetes cluster orchestration or for simple use cases. However, for more complex or demanding use cases, it is recommended to use a CLI/API driven approach. [Terraform](https://www.terraform.io/) is recommended as the tooling to implement this. When you use Terraform with version control and a CI/CD environment, you can have high assurances of consistency and reliability when deploying Kubernetes clusters. This approach also gives you the most customization options. - -Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2-terraform-provider/) for working with Rancher 2.0 Kubernetes. It is called the [Rancher2 Provider.](https://www.terraform.io/docs/providers/rancher2/index.html) - -### Upgrade Rancher in a Staging Environment -All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. - -### Renew Certificates Before they Expire -Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{}}/rancher/v2.x/en/cluster-admin/tools/) to track certificate expiration. - -Rancher-provisioned Kubernetes clusters will use certificates that expire in one year. Clusters provisioned by other means may have a longer or shorter expiration. - -Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/). - -### Enable Recurring Snapshots for Backing up and Restoring the Cluster -Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation]({{}}/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups]({{}}/rancher/v2.x/en/backups/). - -### Provision Clusters with Rancher -When possible, use Rancher to provision your Kubernetes cluster rather than importing a cluster. This will ensure the best compatibility and supportability. - -### Use Stable and Supported Rancher Versions for Production -Do not upgrade production environments to alpha, beta, release candidate (rc), or "latest" versions. These early releases are often not stable and may not have a future upgrade path. - -When installing or upgrading a non-production environment to an early release, anticipate problems such as features not working, data loss, outages, and inability to upgrade without a reinstall. - -Make sure the feature version you are upgrading to is considered "stable" as determined by Rancher. Use the beta, release candidate, and "latest" versions in a testing, development, or demo environment to try out new features. Feature version upgrades, for example 2.1.x to 2.2.x, should be considered as and when they are released. Some bug fixes and most features are not back ported into older versions. - -Keep in mind that Rancher does End of Life support for old versions, so you will eventually want to upgrade if you want to continue to receive patches. - -For more detail on what happens during the Rancher product lifecycle, refer to the [Support Maintenance Terms](https://rancher.com/support-maintenance-terms/). - -# Network Topology -These tips can help Rancher work more smoothly with your network. - -### Use Low-latency Networks for Communication Within Clusters -Kubernetes clusters are best served by low-latency networks. This is especially true for the control plane components and etcd, where lots of coordination and leader election traffic occurs. Networking between Rancher server and the Kubernetes clusters it manages are more tolerant of latency. - -### Allow Rancher to Communicate Directly with Clusters -Limit the use of proxies or load balancers between Rancher server and Kubernetes clusters. As Rancher is maintaining a long-lived web sockets connection, these intermediaries can interfere with the connection lifecycle as they often weren't configured with this use case in mind. - - -# Tips for Scaling and Reliability -These tips can help you scale your cluster more easily. - -### Use One Kubernetes Role Per Host -Separate the etcd, control plane, and worker roles onto different hosts. Don't assign multiple roles to the same host, such as a worker and control plane. This will give you maximum scalability. - -### Run the Control Plane and etcd on Virtual Machines -Run your etcd and control plane nodes on virtual machines where you can scale vCPU and memory easily if needed in the future. - -### Use at Least Three etcd Nodes -Provision 3 or 5 etcd nodes. Etcd requires a quorum to determine a leader by the majority of nodes, therefore it is not recommended to have clusters of even numbers. Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Use at Least Two Control Plane Nodes -Provision two or more control plane nodes. Some control plane components, such as the `kube-apiserver`, run in [active-active](https://www.jscape.com/blog/active-active-vs-active-passive-high-availability-cluster) mode and will give you more scalability. Other components such as kube-scheduler and kube-controller run in active-passive mode (leader elect) and give you more fault tolerance. - -### Monitor Your Cluster -Closely monitor and scale your nodes as needed. You should [enable cluster monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. - - -# Tips for Security -Below are some basic tips for increasing security in Rancher. For more detailed information about securing your cluster, you can refer to these resources: - -- Rancher's [security documentation and Kubernetes cluster hardening guide]({{}}/rancher/v2.x/en/security/) -- [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) - -### Update Rancher with Security Patches -Keep your Rancher installation up to date with the latest patches. Patch updates have important software fixes and sometimes have security fixes. When patches with security fixes are released, customers with Rancher licenses are notified by e-mail. These updates are also posted on Rancher's [forum](https://forums.rancher.com/). - -### Report Security Issues Directly to Rancher -If you believe you have uncovered a security-related problem in Rancher, please communicate this immediately and discretely to the Rancher team (security@rancher.com). Posting security issues on public forums such as Twitter, Rancher Slack, GitHub, etc. can potentially compromise security for all Rancher customers. Reporting security issues discretely allows Rancher to assess and mitigate the problem. Security patches are typically given high priority and released as quickly as possible. - -### Only Upgrade One Component at a Time -In addition to Rancher software updates, closely monitor security fixes for related software, such as Docker, Linux, and any libraries used by your workloads. For production environments, try to avoid upgrading too many entities during a single maintenance window. Upgrading multiple components can make it difficult to root cause an issue in the event of a failure. As business requirements allow, upgrade one component at a time. - -# Tips for Multi-Tenant Clusters - -### Namespaces -Each tenant should have their own unique namespaces within the cluster. This avoids naming conflicts and allows resources to be only visible to their owner through use of RBAC policy - -### Project Isolation -Use Rancher's Project Isolation to automatically generate Network Policy between Projects (sets of Namespaces). This further protects workloads from interference - -### Resource Limits -Enforce use of sane resource limit definitions for every deployment in your cluster. This not only protects the owners of the deployment, but the neighboring resources from other tenants as well. Remember, namespaces do not isolate at the node level, so over-consumption of resources on a node affects other namespace deployments. Admission controllers can be written to require resource limit definitions - -### Resource Requirements -Enforce use of resource requirement definitions for each deployment in your cluster. This enables the scheduler to appropriately schedule workloads. Otherwise you will eventually end up with over-provisioned nodes. - -# Class of Service and Kubernetes Clusters -A class of service describes the expectations around cluster uptime, durability, and duration of maintenance windows. Typically organizations group these characteristics into labels such as "dev" or "prod" - -### Consider fault domains -Kubernetes clusters can span multiple classes of service, however it is important to consider the ability for one workload to affect another. Without proper deployment practices such as resource limits, requirements, etc, a deployment that is not behaving well has the potential to impact the health of the cluster. In a "dev" environment it is common for end-users to exercise less caution with deployments, thus increasing the chance of such behavior. Sharing this behavior with your production workload increases risk. - -### Upgrade risks -Upgrades of Kubernetes are not without risk, the best way to predict the outcome of an upgrade is try it on a cluster of similar load and use case as your production cluster. This is where having non-prod class of service clusters can be advantageous. - -### Resource Efficiency -Clusters can be built with varying degrees of redundancy. In a class of service with low expectations for uptime, resources and cost can be conserved by building clusters without redundant Kubernetes control components. This approach may also free up more budget/resources to increase the redundancy at the production level - -# Network Security -In general, you can use network security best practices in your Rancher and Kubernetes clusters. Consider the following: - -### Use a Firewall Between your Hosts and the Internet -Firewalls should be used between your hosts and the Internet (or corporate Intranet). This could be enterprise firewall appliances in a datacenter or SDN constructs in the cloud, such as VPCs, security groups, ingress, and egress rules. Try to limit inbound access only to ports and IP addresses that require it. Outbound access can be shut off (air gap) if environment sensitive information that requires this restriction. If available, use firewalls with intrusion detection and DDoS prevention. - -### Run Periodic Security Scans -Run security and penetration scans on your environment periodically. Even with well design infrastructure, a poorly designed microservice could compromise the entire environment. diff --git a/content/rancher/v2.x/en/best-practices/v2.5/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/_index.md deleted file mode 100644 index ffdc1777b..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Best Practices Guide for Rancher v2.5 -shortTitle: v2.5 -weight: 1 ---- - -The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. - -If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. - -Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. - -For more guidance on best practices, you can consult these resources: - -- [Security]({{}}/rancher/v2.x/en/security/) -- [Rancher Blog](https://rancher.com/blog/) - - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) -- [Rancher Forum](https://forums.rancher.com/) -- [Rancher Users Slack](https://slack.rancher.io/) -- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/_index.md deleted file mode 100644 index 9d3d86d1f..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Best Practices for Rancher Managed Clusters -shortTitle: Rancher Managed Clusters -weight: 2 ---- - -### Logging - -Refer to [this guide](./logging) for our recommendations for cluster-level logging and application logging. - -### Monitoring - -Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. Refer to this [guide](./monitoring) for our recommendations. - -### Tips for Setting Up Containers - -Running well-built containers can greatly impact the overall performance and security of your environment. Refer to this [guide](./containers) for tips. - -### Best Practices for Rancher Managed vSphere Clusters - -This [guide](./managed-vsphere) outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/containers/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/containers/_index.md deleted file mode 100644 index f75f04923..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/containers/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Tips for Setting Up Containers -weight: 100 -aliases: - - /rancher/v2.x/en/best-practices/containers ---- - -Running well-built containers can greatly impact the overall performance and security of your environment. - -Below are a few tips for setting up your containers. - -For a more detailed discussion of security for containers, you can also refer to Rancher's [Guide to Container Security.](https://rancher.com/complete-guide-container-security) - -### Use a Common Container OS - -When possible, you should try to standardize on a common container base OS. - -Smaller distributions such as Alpine and BusyBox reduce container image size and generally have a smaller attack/vulnerability surface. - -Popular distributions such as Ubuntu, Fedora, and CentOS are more field-tested and offer more functionality. - -### Start with a FROM scratch container -If your microservice is a standalone static binary, you should use a FROM scratch container. - -The FROM scratch container is an [official Docker image](https://hub.docker.com/_/scratch) that is empty so that you can use it to design minimal images. - -This will have the smallest attack surface and smallest image size. - -### Run Container Processes as Unprivileged -When possible, use a non-privileged user when running processes within your container. While container runtimes provide isolation, vulnerabilities and attacks are still possible. Inadvertent or accidental host mounts can also be impacted if the container is running as root. For details on configuring a security context for a pod or container, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - -### Define Resource Limits -Apply CPU and memory limits to your pods. This can help manage the resources on your worker nodes and avoid a malfunctioning microservice from impacting other microservices. - -In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the Rancher docs. - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project or namespace, all containers will require a respective CPU or Memory field set during creation. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -The Kubernetes docs have more information on how resource limits can be set at the [container level](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) and the namespace level. - -### Define Resource Requirements -You should apply CPU and memory requirements to your pods. This is crucial for informing the scheduler which type of compute node your pod needs to be placed on, and ensuring it does not over-provision that node. In Kubernetes, you can set a resource requirement by defining `resources.requests` in the resource requests field in a pod's container spec. For details, refer to the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). - -> **Note:** If you set a resource limit for the namespace that the pod is deployed in, and the container doesn't have a specific resource request, the pod will not be allowed to start. To avoid setting these fields on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -It is recommended to define resource requirements on the container level because otherwise, the scheduler makes assumptions that will likely not be helpful to your application when the cluster experiences load. - -### Liveness and Readiness Probes -Set up liveness and readiness probes for your container. Unless your container completely crashes, Kubernetes will not know it's unhealthy unless you create an endpoint or mechanism that can report container status. Alternatively, make sure your container halts and crashes if unhealthy. - -The Kubernetes docs show how to [configure liveness and readiness probes for containers.](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/logging/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/logging/_index.md deleted file mode 100644 index 3e9cf02ad..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/logging/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Logging Best Practices -weight: 1 ---- -In this guide, we recommend best practices for cluster-level logging and application logging. - -- [Changes in Logging in Rancher v2.5](#changes-in-logging-in-rancher-v2-5) -- [Cluster-level Logging](#cluster-level-logging) -- [Application Logging](#application-logging) -- [General Best Practices](#general-best-practices) - -# Changes in Logging in Rancher v2.5 - -Before Rancher v2.5, logging in Rancher has historically been a pretty static integration. There were a fixed list of aggregators to choose from (ElasticSearch, Splunk, Kafka, Fluentd and Syslog), and only two configuration points to choose (Cluster-level and Project-level). - -Logging in 2.5 has been completely overhauled to provide a more flexible experience for log aggregation. With the new logging feature, administrators and users alike can deploy logging that meets fine-grained collection criteria while offering a wider array of destinations and configuration options. - -"Under the hood", Rancher logging uses the Banzai Cloud logging operator. We provide manageability of this operator (and its resources), and tie that experience in with managing your Rancher clusters. - -# Cluster-level Logging - -### Cluster-wide Scraping - -For some users, it is desirable to scrape logs from every container running in the cluster. This usually coincides with your security team's request (or requirement) to collect all logs from all points of execution. - -In this scenario, it is recommended to create at least two _ClusterOutput_ objects - one for your security team (if you have that requirement), and one for yourselves, the cluster administrators. When creating these objects take care to choose an output endpoint that can handle the significant log traffic coming from the entire cluster. Also make sure to choose an appropriate index to receive all these logs. - -Once you have created these _ClusterOutput_ objects, create a _ClusterFlow_ to collect all the logs. Do not define any _Include_ or _Exclude_ rules on this flow. This will ensure that all logs from across the cluster are collected. If you have two _ClusterOutputs_, make sure to send logs to both of them. - -### Kubernetes Components - -_ClusterFlows_ have the ability to collect logs from all containers on all hosts in the Kubernetes cluster. This works well in cases where those containers are part of a Kubernetes pod; however, RKE containers exist outside of the scope of Kubernetes. - -Currently (as of v2.5.1) the logs from RKE containers are collected, but are not able to easily be filtered. This is because those logs do not contain information as to the source container (e.g. `etcd` or `kube-apiserver`). - -A future release of Rancher will include the source container name which will enable filtering of these component logs. Once that change is made, you will be able to customize a _ClusterFlow_ to retrieve **only** the Kubernetes component logs, and direct them to an appropriate output. - -# Application Logging - -Best practice not only in Kubernetes but in all container-based applications is to direct application logs to `stdout`/`stderr`. The container runtime will then trap these logs and do **something** with them - typically writing them to a file. Depending on the container runtime (and its configuration), these logs can end up in any number of locations. - -In the case of writing the logs to a file, Kubernetes helps by creating a `/var/log/containers` directory on each host. This directory symlinks the log files to their actual destination (which can differ based on configuration or container runtime). - -Rancher logging will read all log entries in `/var/log/containers`, ensuring that all log entries from all containers (assuming a default configuration) will have the opportunity to be collected and processed. - -### Specific Log Files - -Log collection only retrieves `stdout`/`stderr` logs from pods in Kubernetes. But what if we want to collect logs from other files that are generated by applications? Here, a log streaming sidecar (or two) may come in handy. - -The goal of setting up a streaming sidecar is to take log files that are written to disk, and have their contents streamed to `stdout`. This way, the Banzai Logging Operator can pick up those logs and send them to your desired output. - -To set this up, edit your workload resource (e.g. Deployment) and add the following sidecar definition: - -``` -... -containers: -- args: - - -F - - /path/to/your/log/file.log - command: - - tail - image: busybox - name: stream-log-file-[name] - volumeMounts: - - mountPath: /path/to/your/log - name: mounted-log -... -``` - -This will add a container to your workload definition that will now stream the contents of (in this example) `/path/to/your/log/file.log` to `stdout`. - -This log stream is then automatically collected according to any _Flows_ or _ClusterFlows_ you have setup. You may also wish to consider creating a _Flow_ specifically for this log file by targeting the name of the container. See example: - -``` -... -spec: - match: - - select: - container_names: - - stream-log-file-name -... -``` - - -# General Best Practices - -- Where possible, output structured log entries (e.g. `syslog`, JSON). This makes handling of the log entry easier as there are already parsers written for these formats. -- Try to provide the name of the application that is creating the log entry, in the entry itself. This can make troubleshooting easier as Kubernetes objects do not always carry the name of the application as the object name. For instance, a pod ID may be something like `myapp-098kjhsdf098sdf98` which does not provide much information about the application running inside the container. -- Except in the case of collecting all logs cluster-wide, try to scope your _Flow_ and _ClusterFlow_ objects tightly. This makes it easier to troubleshoot when problems arise, and also helps ensure unrelated log entries do not show up in your aggregator. An example of tight scoping would be to constrain a _Flow_ to a single _Deployment_ in a namespace, or perhaps even a single container within a _Pod_. -- Keep the log verbosity down except when troubleshooting. High log verbosity poses a number of issues, chief among them being **noise**: significant events can be drowned out in a sea of `DEBUG` messages. This is somewhat mitigated with automated alerting and scripting, but highly verbose logging still places an inordinate amount of stress on the logging infrastructure. -- Where possible, try to provide a transaction or request ID with the log entry. This can make tracing application activity across multiple log sources easier, especially when dealing with distributed applications. \ No newline at end of file diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/managed-vsphere/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/managed-vsphere/_index.md deleted file mode 100644 index fa936c404..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/managed-vsphere/_index.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Best Practices for Rancher Managed vSphere Clusters -shortTitle: Rancher Managed Clusters in vSphere ---- - -This guide outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. - -- [1. VM Considerations](#1-vm-considerations) -- [2. Network Considerations](#2-network-considerations) -- [3. Storage Considerations](#3-storage-considerations) -- [4. Backups and Disaster Recovery](#4-backups-and-disaster-recovery) - -
Solution Overview
- -![Solution Overview](/img/rancher/solution_overview.drawio.svg) - -# 1. VM Considerations - -### Leverage VM Templates to Construct the Environment - -To facilitate consistency across the deployed Virtual Machines across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customisation options. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across ESXi Hosts - -Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across Datastores - -Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. - -### Configure VM's as Appropriate for Kubernetes - -It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. - -# 2. Network Considerations - -### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes - -Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. - -### Consistent IP Addressing for VM's - -Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -# 3. Storage Considerations - -### Leverage SSD Drives for ETCD Nodes - -ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. - -# 4. Backups and Disaster Recovery - -### Perform Regular Downstream Cluster Backups - -Kubernetes uses etcd to store all its data - from configuration, state and metadata. Backing this up is crucial in the event of disaster recovery. - -### Back up Downstream Node VMs - -Incorporate the Rancher downstream node VM's within a standard VM backup policy. \ No newline at end of file diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/monitoring/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/monitoring/_index.md deleted file mode 100644 index 72f74e3f4..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-managed/monitoring/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Monitoring Best Practices -weight: 2 ---- - -Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. This is not different when using Kubernetes and Rancher. Fortunately the integrated monitoring and alerting functionality makes this whole process a lot easier. - -The [Rancher Documentation]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/) describes in detail, how you can set up a complete Prometheus and Grafana stack. Out of the box this will scrape monitoring data from all system and Kubernetes components in your cluster and provide sensible dashboards and alerts for them to get started. But for a reliable setup, you also need to monitor your own workloads and adapt Prometheus and Grafana to your own specific use cases and cluster sizes. This document aims to give you best practices for this. - -- [What to Monitor](#what-to-monitor) -- [Configuring Prometheus Resource Usage](#configuring-prometheus-resource-usage) -- [Scraping Custom Workloads](#scraping-custom-workloads) -- [Monitoring in a (Micro)Service Architecture](#monitoring-in-a-micro-service-architecture) -- [Real User Monitoring](#real-user-monitoring) -- [Security Monitoring](#security-monitoring) -- [Setting up Alerts](#setting-up-alerts) - -# What to Monitor - -Kubernetes itself, as well as applications running inside of it, form a distributed system where different components interact with each other. For the whole system and each individual component, you have to ensure performance, availability, reliability and scalability. A good resource with more details and information is Google's free [Site Reliability Engineering Book](https://landing.google.com/sre/sre-book/), especially the chapter about [Monitoring distributed systems](https://landing.google.com/sre/sre-book/chapters/monitoring-distributed-systems/). - -# Configuring Prometheus Resource Usage - -When installing the integrated monitoring stack, Rancher allows to configure several settings that are dependent on the size of your cluster and the workloads running in it. This chapter covers these in more detail. - -### Storage and Data Retention - -The amount of storage needed for Prometheus directly correlates to the amount of time series and labels that you store and the data retention you have configured. It is important to note that Prometheus is not meant to be used as a long-term metrics storage. Data retention time is usually only a couple of days and not weeks or months. The reason for this is that Prometheus does not perform any aggregation on its stored metrics. This is great because aggregation can dilute data, but it also means that the needed storage grows linearly over time without retention. - -One way to calculate the necessary storage is to look at the average size of a storage chunk in Prometheus with this query - -``` -rate(prometheus_tsdb_compaction_chunk_size_bytes_sum[1h]) / rate(prometheus_tsdb_compaction_chunk_samples_sum[1h]) -``` - -Next, find out your data ingestion rate per second: - -``` -rate(prometheus_tsdb_head_samples_appended_total[1h]) -``` - -and then multiply this with the retention time, adding a few percentage points as buffer: - -``` -average chunk size in bytes * ingestion rate per second * retention time in seconds * 1.1 = necessary storage in bytes -``` - -You can find more information about how to calculate the necessary storage in this [blog post](https://www.robustperception.io/how-much-disk-space-do-prometheus-blocks-use). - -You can read more about the Prometheus storage concept in the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/storage). - -### CPU and Memory Requests and Limits - -In larger Kubernetes clusters Prometheus can consume quite a bit of memory. The amount of memory Prometheus needs directly correlates to the amount of time series and amount of labels it stores and the scrape interval in which these are filled. - -You can find more information about how to calculate the necessary memory in this [blog post](https://www.robustperception.io/how-much-ram-does-prometheus-2-x-need-for-cardinality-and-ingestion). - -The amount of necessary CPUs correlate with the amount of queries you are performing. - -### Federation and Long-term Storage - -Prometheus is not meant to store metrics for a long amount of time, but should only be used for short term storage. - -In order to store some, or all metrics for a long time, you can leverage Prometheus' [remote read/write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations) capabilities to connect it to storage systems like [Thanos](https://thanos.io/), [InfluxDB](https://www.influxdata.com/), [M3DB](https://www.m3db.io/), or others. You can find an example setup in this [blog post](https://rancher.com/blog/2020/prometheus-metric-federation). - -# Scraping Custom Workloads - -While the integrated Rancher Monitoring already scrapes system metrics from a cluster's nodes and system components, the custom workloads that you deploy on Kubernetes should also be scraped for data. For that you can configure Prometheus to do an HTTP request to an endpoint of your applications in a certain interval. These endpoints should then return their metrics in a Prometheus format. - -In general, you want to scrape data from all the workloads running in your cluster so that you can use them for alerts or debugging issues. Often, you recognize that you need some data only when you actually need the metrics during an incident. It is good, if it is already scraped and stored. Since Prometheus is only meant to be a short-term metrics storage, scraping and keeping lots of data is usually not that expensive. If you are using a long-term storage solution with Prometheus, you can then still decide which data you are actually persisting and keeping there. - -### About Prometheus Exporters - -A lot of 3rd party workloads like databases, queues or web-servers either already support exposing metrics in a Prometheus format, or there are so called exporters available that translate between the tool's metrics and the format that Prometheus understands. Usually you can add these exporters as additional sidecar containers to the workload's Pods. A lot of helm charts already include options to deploy the correct exporter. Additionally you can find a curated list of exports by SysDig on [promcat.io](https://promcat.io/) and on [ExporterHub](https://exporterhub.io/). - -### Prometheus support in Programming Languages and Frameworks - -To get your own custom application metrics into Prometheus, you have to collect and expose these metrics directly from your application's code. Fortunately, there are already libraries and integrations available to help with this for most popular programming languages and frameworks. One example for this is the Prometheus support in the [Spring Framework](https://docs.spring.io/spring-metrics/docs/current/public/prometheus). - -### ServiceMonitors and PodMonitors - -Once all your workloads expose metrics in a Prometheus format, you have to configure Prometheus to scrape it. Under the hood Rancher is using the [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator). This makes it easy to add additional scraping targets with ServiceMonitors and PodMonitors. A lot of helm charts already include an option to create these monitors directly. You can also find more information in the Rancher documentation. - -### Prometheus Push Gateway - -There are some workloads that are traditionally hard to scrape by Prometheus. Examples for these are short lived workloads like Jobs and CronJobs, or applications that do not allow sharing data between individual handled incoming requests, like PHP applications. - -To still get metrics for these use cases, you can set up [prometheus-pushgateways](https://github.com/prometheus/pushgateway). The CronJob or PHP application would push metric updates to the pushgateway. The pushgateway aggregates and exposes them through an HTTP endpoint, which then can be scraped by Prometheus. - -### Prometheus Blackbox Monitor - -Sometimes it is useful to monitor workloads from the outside. For this, you can use the [Prometheus blackbox-exporter](https://github.com/prometheus/blackbox_exporter) which allows probing any kind of endpoint over HTTP, HTTPS, DNS, TCP and ICMP. - -# Monitoring in a (Micro)Service Architecture - -If you have a (micro)service architecture where multiple individual workloads within your cluster are communicating with each other, it is really important to have detailed metrics and traces about this traffic to understand how all these workloads are communicating with each other and where a problem or bottleneck may be. - -Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](https://rancher.com/docs/rancher/v2.x/en/cluster-admin/tools/istio/) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. - -# Real User Monitoring - -Monitoring the availability and performance of all your internal workloads is vitally important to run stable, reliable and fast applications. But these metrics only show you parts of the picture. To get a complete view it is also necessary to know how your end users are actually perceiving it. For this you can look into various [Real user monitoring solutions](https://en.wikipedia.org/wiki/Real_user_monitoring). - -# Security Monitoring - -In addition to monitoring workloads to detect performance, availability or scalability problems, the cluster and the workloads running into it should also be monitored for potential security problems. A good starting point is to frequently run and alert on [CIS Scans]({{}}/rancher/v2.x/en/cis-scans/v2.5/) which check if the cluster is configured according to security best practices. - -For the workloads, you can have a look at Kubernetes and Container security solutions like [Falko](https://falco.org/), [Aqua Kubernetes Security](https://www.aquasec.com/solutions/kubernetes-container-security/), [SysDig](https://sysdig.com/). - -# Setting up Alerts - -Getting all the metrics into a monitoring systems and visualizing them in dashboards is great, but you also want to be pro-actively alerted if something goes wrong. - -The integrated Rancher monitoring already configures a sensible set of alerts that make sense in any Kubernetes cluster. You should extend these to cover your specific workloads and use cases. - -When setting up alerts, configure them for all the workloads that are critical to the availability of your applications. But also make sure that they are not too noisy. Ideally every alert you are receiving should be because of a problem that needs your attention and needs to be fixed. If you have alerts that are firing all the time but are not that critical, there is a danger that you start ignoring your alerts all together and then miss the real important ones. Less may be more here. Start to focus on the real important metrics first, for example alert if your application is offline. Fix all the problems that start to pop up and then start to create more detailed alerts. - -If an alert starts firing, but there is nothing you can do about it at the moment, it's also fine to silence the alert for a certain amount of time, so that you can look at it later. - -You can find more information on how to set up alerts and notification channels in the [Rancher Documentation]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5). \ No newline at end of file diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/_index.md deleted file mode 100644 index 32786386a..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Best Practices for the Rancher Server -shortTitle: Rancher Server -weight: 1 ---- - -This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. - -### Recommended Architecture and Infrastructure - -Refer to this [guide](./deployment-types) for our general advice for setting up the Rancher server on a high-availability Kubernetes cluster. - -### Deployment Strategies - -This [guide](./deployment-strategies) is designed to help you choose whether a regional deployment strategy or a hub-and-spoke deployment strategy is better for a Rancher server that manages downstream Kubernetes clusters. - -### Installing Rancher in a vSphere Environment - -This [guide](./rancher-in-vsphere) outlines a reference architecture for installing Rancher in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. \ No newline at end of file diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-strategies/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-strategies/_index.md deleted file mode 100644 index 35a1e08b2..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-strategies/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Rancher Deployment Strategy -weight: 100 ---- - -There are two recommended deployment strategies for a Rancher server that manages downstream Kubernetes clusters. Each one has its own pros and cons. Read more about which one would fit best for your use case: - -* [Hub and Spoke](#hub-and-spoke) -* [Regional](#regional) - -# Hub & Spoke Strategy ---- - -In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. - -{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} - -### Pros - -* Environments could have nodes and network connectivity across regions. -* Single control plane interface to view/see all regions and environments. -* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. - -### Cons - -* Subject to network latencies. -* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. - -# Regional Strategy ---- -In the regional deployment model a control plane is deployed in close proximity to the compute nodes. - -{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} - -### Pros - -* Rancher functionality in regions stay operational if a control plane in another region goes down. -* Network latency is greatly reduced, improving the performance of functionality in Rancher. -* Upgrades of the Rancher control plane can be done independently per region. - -### Cons - -* Overhead of managing multiple Rancher installations. -* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. -* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-types/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-types/_index.md deleted file mode 100644 index 6cc5b883f..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-types/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Tips for Running Rancher -weight: 100 -aliases: - - /rancher/v2.x/en/best-practices/deployment-types ---- - -This guide is geared toward use cases where Rancher is used to manage downstream Kubernetes clusters. The high-availability setup is intended to prevent losing access to downstream clusters if the Rancher server is not available. - -A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. - -If you are installing Rancher in a vSphere environment, refer to the best practices documented [here.](../rancher-in-vsphere) - -When you set up your high-availability Rancher installation, consider the following: - -### Run Rancher on a Separate Cluster -Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. - -### Make sure nodes are configured correctly for Kubernetes ### -It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) - -### When using RKE: Back up the Statefile -RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. - -### Run All Nodes in the Cluster in the Same Datacenter -For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. - -### Development and Production Environments Should be Similar -It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. - -### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. - -However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. - -After you [enable monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. - diff --git a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/rancher-in-vsphere/_index.md b/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/rancher-in-vsphere/_index.md deleted file mode 100644 index 3abadd946..000000000 --- a/content/rancher/v2.x/en/best-practices/v2.5/rancher-server/rancher-in-vsphere/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Installing Rancher in a vSphere Environment -shortTitle: On-Premises Rancher in vSphere -weight: 3 ---- - -This guide outlines a reference architecture for installing Rancher on an RKE Kubernetes cluster in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. - -- [1. Load Balancer Considerations](#1-load-balancer-considerations) -- [2. VM Considerations](#2-vm-considerations) -- [3. Network Considerations](#3-network-considerations) -- [4. Storage Considerations](#4-storage-considerations) -- [5. Backups and Disaster Recovery](#5-backups-and-disaster-recovery) - -
Solution Overview
- -![Solution Overview](/docs/img/rancher/rancher-on-prem-vsphere.svg) - -# 1. Load Balancer Considerations - -A load balancer is required to direct traffic to the Rancher workloads residing on the RKE nodes. - -### Leverage Fault Tolerance and High Availability - -Leverage the use of an external (hardware or software) load balancer that has inherit high-availability functionality (F5, NSX-T, Keepalived, etc). - -### Back Up Load Balancer Configuration - -In the event of a Disaster Recovery activity, availability of the Load balancer configuration will expedite the recovery process. - -### Configure Health Checks - -Configure the Load balancer to automatically mark nodes as unavailable if a health check is failed. For example, NGINX can facilitate this with: - -`max_fails=3 fail_timeout=5s` - -### Leverage an External Load Balancer - -Avoid implementing a software load balancer within the management cluster. - -### Secure Access to Rancher - -Configure appropriate Firewall / ACL rules to only expose access to Rancher - -# 2. VM Considerations - -### Size the VM's According to Rancher Documentation - -https://rancher.com/docs/rancher/v2.x/en/installation/requirements/ - -### Leverage VM Templates to Construct the Environment - -To facilitate consistency across the deployed Virtual Machines across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customisation options. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across ESXi Hosts - -Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across Datastores - -Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. - -### Configure VM's as Appropriate for Kubernetes - -It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. - -# 3. Network Considerations - -### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes - -Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. - -### Consistent IP Addressing for VM's - -Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -# 4. Storage Considerations - -### Leverage SSD Drives for ETCD Nodes - -ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. - -# 5. Backups and Disaster Recovery - -### Perform Regular Management Cluster Backups - -Rancher stores its data in the ETCD datastore of the Kubernetes cluster it resides on. Like with any Kubernetes cluster, perform frequent, tested backups of this cluster. - -### Back up Rancher Cluster Node VMs - -Incorporate the Rancher management node VM's within a standard VM backup policy. diff --git a/content/rancher/v2.x/en/cis-scans/_index.md b/content/rancher/v2.x/en/cis-scans/_index.md deleted file mode 100644 index 15b5988f5..000000000 --- a/content/rancher/v2.x/en/cis-scans/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: CIS Scans -weight: 18 ---- - -_Available as of v2.4.0_ - - -If you are using Rancher v2.5, refer to the CIS scan documentation [here.](./v2.5) - -If you are using Rancher v2.4, refer to the CIS scan documentation [here.](./v2.4) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cis-scans/v2.4/_index.md b/content/rancher/v2.x/en/cis-scans/v2.4/_index.md deleted file mode 100644 index 278ac6341..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.4/_index.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: CIS Scans in Rancher v2.4 (Deprecated) -shortTitle: Rancher v2.4 -weight: 2 -aliases: - - /rancher/v2.x/en/cis-scans/legacy ---- - -_Available as of v2.4.0_ - -This section contains the legacy documentation for the CIS Scan tool that was released in Rancher v2.4, and was available under the **Tools** menu in the top navigation bar of the cluster manager. - -As of Rancher v2.5, it is deprecated and replaced with the `rancher-cis-benchmark` application. - -- [Prerequisites](#prerequisites) -- [Running a scan](#running-a-scan) -- [Scheduling recurring scans](#scheduling-recurring-scans) -- [Skipping tests](#skipping-tests) -- [Setting alerts](#setting-alerts) -- [Deleting a report](#deleting-a-report) -- [Downloading a report](#downloading-a-report) -- [List of skipped and not applicable tests](#list-of-skipped-and-not-applicable-tests) - - -# Prerequisites - -To run security scans on a cluster and access the generated reports, you must be an [Administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [Cluster Owner.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - -Rancher can only run security scans on clusters that were created with RKE, which includes custom clusters and clusters that Rancher created in an infrastructure provider such as Amazon EC2 or GCE. Imported clusters and clusters in hosted Kubernetes providers can't be scanned by Rancher. - -The security scan cannot run in a cluster that has Windows nodes. - -You will only be able to see the CIS scan reports for clusters that you have access to. - -# Running a Scan - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Click **Run Scan.** -1. Choose a CIS scan profile. - -**Result:** A report is generated and displayed in the **CIS Scans** page. To see details of the report, click the report's name. - -# Scheduling Recurring Scans - -Recurring scans can be scheduled to run on any RKE Kubernetes cluster. - -To enable recurring scans, edit the advanced options in the cluster configuration during cluster creation or after the cluster has been created. - -To schedule scans for an existing cluster: - -1. Go to the cluster view in Rancher. -1. Click **Tools > CIS Scans.** -1. Click **Add Schedule.** This takes you to the section of the cluster editing page that is applicable to configuring a schedule for CIS scans. (This section can also be reached by going to the cluster view, clicking **⋮ > Edit,** and going to the **Advanced Options.**) -1. In the **CIS Scan Enabled** field, click **Yes.** -1. In the **CIS Scan Profile** field, choose a **Permissive** or **Hardened** profile. The corresponding CIS Benchmark version is included in the profile name. Note: Any skipped tests [defined in a separate ConfigMap](#skipping-tests) will be skipped regardless of whether a **Permissive** or **Hardened** profile is selected. When selecting the the permissive profile, you should see which tests were skipped by Rancher (tests that are skipped by default for RKE clusters) and which tests were skipped by a Rancher user. In the hardened test profile, the only skipped tests will be skipped by users. -1. In the **CIS Scan Interval (cron)** job, enter a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) to define how often the cluster will be scanned. -1. In the **CIS Scan Report Retention** field, enter the number of past reports that should be kept. - -**Result:** The security scan will run and generate reports at the scheduled intervals. - -The test schedule can be configured in the `cluster.yml`: - -```yaml -scheduled_cluster_scan: -    enabled: true -    scan_config: -        cis_scan_config: -            override_benchmark_version: rke-cis-1.4 -            profile: permissive -    schedule_config: -        cron_schedule: 0 0 * * * -        retention: 24 -``` - - -# Skipping Tests - -You can define a set of tests that will be skipped by the CIS scan when the next report is generated. - -These tests will be skipped for subsequent CIS scans, including both manually triggered and scheduled scans, and the tests will be skipped with any profile. - -The skipped tests will be listed alongside the test profile name in the cluster configuration options when a test profile is selected for a recurring cluster scan. The skipped tests will also be shown every time a scan is triggered manually from the Rancher UI by clicking **Run Scan.** The display of skipped tests allows you to know ahead of time which tests will be run in each scan. - -To skip tests, you will need to define them in a Kubernetes ConfigMap resource. Each skipped CIS scan test is listed in the ConfigMap alongside the version of the CIS benchmark that the test belongs to. - -To skip tests by editing a ConfigMap resource, - -1. Create a `security-scan` namespace. -1. Create a ConfigMap named `security-scan-cfg`. -1. Enter the skip information under the key `config.json` in the following format: - - ```json - { - "skip": { - "rke-cis-1.4": [ - "1.1.1", - "1.2.2" - ] - } - } - ``` - - In the example above, the CIS benchmark version is specified alongside the tests to be skipped for that version. - -**Result:** These tests will be skipped on subsequent scans that use the defined CIS Benchmark version. - -# Setting Alerts - -Rancher provides a set of alerts for cluster scans. which are not configured to have notifiers by default: - -- A manual cluster scan was completed -- A manual cluster scan has failures -- A scheduled cluster scan was completed -- A scheduled cluster scan has failures - -> **Prerequisite:** You need to configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) before configuring, sending, or receiving alerts. - -To activate an existing alert for a CIS scan result, - -1. From the cluster view in Rancher, click **Tools > Alerts.** -1. Go to the section called **A set of alerts for cluster scans.** -1. Go to the alert you want to activate and click **⋮ > Activate.** -1. Go to the alert rule group **A set of alerts for cluster scans** and click **⋮ > Edit.** -1. Scroll down to the **Alert** section. In the **To** field, select the notifier that you would like to use for sending alert notifications. -1. Optional: To limit the frequency of the notifications, click on **Show advanced options** and configure the time interval of the alerts. -1. Click **Save.** - -**Result:** The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. - -To create a new alert, - -1. Go to the cluster view and click **Tools > CIS Scans.** -1. Click **Add Alert.** -1. Fill out the form. -1. Enter a name for the alert. -1. In the **Is** field, set the alert to be triggered when a scan is completed or when a scan has a failure. -1. In the **Send a** field, set the alert as a **Critical,** **Warning,** or **Info** alert level. -1. Choose a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) for the alert. - -**Result:** The alert is created and activated. The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. - -For more information about alerts, refer to [this page.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) - -# Deleting a Report - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Go to the report that should be deleted. -1. Click the **⋮ > Delete.** -1. Click **Delete.** - -# Downloading a Report - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Go to the report that you want to download. Click **⋮ > Download.** - -**Result:** The report is downloaded in CSV format. For more information on each columns, refer to the [section about the generated report.](#about-the-generated-report) - -# List of Skipped and Not Applicable Tests - -For a list of skipped and not applicable tests, refer to this page. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cis-scans/v2.4/skipped-tests/_index.md b/content/rancher/v2.x/en/cis-scans/v2.4/skipped-tests/_index.md deleted file mode 100644 index d31353304..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.4/skipped-tests/_index.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Skipped and Not Applicable Tests -weight: 1 -aliases: - - /rancher/v2.x/en/cis-scans/legacy/skipped-tests ---- - -This section lists the tests that are skipped in the permissive test profile for RKE. - -All the tests that are skipped and not applicable on this page will be counted as Not Applicable in the v2.5 generated report. The skipped test count will only mention the user-defined skipped tests. This allows user-skipped tests to be distinguished from the tests that are skipped by default in the RKE permissive test profile. - -- [CIS Benchmark v1.5](#cis-benchmark-v1-5) -- [CIS Benchmark v1.4](#cis-benchmark-v1-4) - -# CIS Benchmark v1.5 - -### CIS Benchmark v1.5 Skipped Tests - -| Number | Description | Reason for Skipping | -| ---------- | ------------- | --------- | -| 1.1.12 | Ensure that the etcd data directory ownership is set to etcd:etcd (Scored) | A system service account is required for etcd data directory ownership. Refer to Rancher's hardening guide for more details on how to configure this ownership. | -| 1.2.6 | Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored) | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. | -| 1.2.16 | Ensure that the admission control plugin PodSecurityPolicy is set (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 1.2.33 | Ensure that the --encryption-provider-config argument is set as appropriate (Not Scored) | Enabling encryption changes how data can be recovered as data is encrypted. | -| 1.2.34 | Ensure that encryption providers are appropriately configured (Not Scored) | Enabling encryption changes how data can be recovered as data is encrypted. | -| 4.2.6 | Ensure that the --protect-kernel-defaults argument is set to true (Scored) | System level configurations are required before provisioning the cluster in order for this argument to be set to true. | -| 4.2.10 | Ensure that the--tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored) | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. | -| 5.1.5 | Ensure that default service accounts are not actively used. (Scored) | Kubernetes provides default service accounts to be used. | -| 5.2.2 | Minimize the admission of containers wishing to share the host process ID namespace (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.2.3 | Minimize the admission of containers wishing to share the host IPC namespace (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.2.4 | Minimize the admission of containers wishing to share the host network namespace (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.2.5 | Minimize the admission of containers with allowPrivilegeEscalation (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.3.2 | Ensure that all Namespaces have Network Policies defined (Scored) | Enabling Network Policies can prevent certain applications from communicating with each other. | -| 5.6.4 | The default namespace should not be used (Scored) | Kubernetes provides a default namespace. | - -### CIS Benchmark v1.5 Not Applicable Tests - -| Number | Description | Reason for being not applicable | -| ---------- | ------------- | --------- | -| 1.1.1 | Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. | -| 1.1.2 | Ensure that the API server pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. | -| 1.1.3 | Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.1.4 | Ensure that the controller manager pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.1.5 | Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.6 | Ensure that the scheduler pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.7 | Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. | -| 1.1.8 | Ensure that the etcd pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. | -| 1.1.13 | Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. | -| 1.1.14 | Ensure that the admin.conf file ownership is set to root:root (Scored) | Clusters provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. | -| 1.1.15 | Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.16 | Ensure that the scheduler.conf file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.17 | Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.1.18 | Ensure that the controller-manager.conf file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.3.6 | Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) | Clusters provisioned by RKE handles certificate rotation directly through RKE. | -| 4.1.1 | Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. | -| 4.1.2 | Ensure that the kubelet service file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. | -| 4.1.9 | Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. All configuration is passed in as arguments at container run time. | -| 4.1.10 | Ensure that the kubelet configuration file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. All configuration is passed in as arguments at container run time. | -| 4.2.12 | Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) | Clusters provisioned by RKE handles certificate rotation directly through RKE. | - -# CIS Benchmark v1.4 - -The skipped and not applicable tests for CIS Benchmark v1.4 are as follows: - -### CIS Benchmark v1.4 Skipped Tests - -Number | Description | Reason for Skipping ----|---|--- -1.1.11 | "Ensure that the admission control plugin AlwaysPullImages is set (Scored)" | Enabling AlwaysPullImages can use significant bandwidth. -1.1.21 | "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored)" | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. -1.1.24 | "Ensure that the admission control plugin PodSecurityPolicy is set (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.1.34 | "Ensure that the --encryption-provider-config argument is set as appropriate (Scored)" | Enabling encryption changes how data can be recovered as data is encrypted. -1.1.35 | "Ensure that the encryption provider is set to aescbc (Scored)" | Enabling encryption changes how data can be recovered as data is encrypted. -1.1.36 | "Ensure that the admission control plugin EventRateLimit is set (Scored)" | EventRateLimit needs to be tuned depending on the cluster. -1.2.2 | "Ensure that the --address argument is set to 127.0.0.1 (Scored)" | Adding this argument prevents Rancher's monitoring tool to collect metrics on the scheduler. -1.3.7 | "Ensure that the --address argument is set to 127.0.0.1 (Scored)" | Adding this argument prevents Rancher's monitoring tool to collect metrics on the controller manager. -1.4.12 | "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" | A system service account is required for etcd data directory ownership. Refer to Rancher's hardening guide for more details on how to configure this ownership. -1.7.2 | "Do not admit containers wishing to share the host process ID namespace (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.7.3 | "Do not admit containers wishing to share the host IPC namespace (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.7.4 | "Do not admit containers wishing to share the host network namespace (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -1.7.5 | " Do not admit containers with allowPrivilegeEscalation (Scored)" | Enabling Pod Security Policy can cause applications to unexpectedly fail. -2.1.6 | "Ensure that the --protect-kernel-defaults argument is set to true (Scored)" | System level configurations are required before provisioning the cluster in order for this argument to be set to true. -2.1.10 | "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)" | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. - -### CIS Benchmark v1.4 Not Applicable Tests - -Number | Description | Reason for being not applicable ----|---|--- -1.1.9 | "Ensure that the --repair-malformed-updates argument is set to false (Scored)" | The argument --repair-malformed-updates has been removed as of Kubernetes version 1.14 -1.3.6 | "Ensure that the RotateKubeletServerCertificate argument is set to true" | Cluster provisioned by RKE handles certificate rotation directly through RKE. -1.4.1 | "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -1.4.2 | "Ensure that the API server pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -1.4.3 | "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -1.4.4 | "Ensure that the controller manager pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -1.4.5 | "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -1.4.6 | "Ensure that the scheduler pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -1.4.7 | "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -1.4.8 | "Ensure that the etcd pod specification file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -1.4.13 | "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. -1.4.14 | "Ensure that the admin.conf file ownership is set to root:root (Scored)" | Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. -2.1.8 | "Ensure that the --hostname-override argument is not set (Scored)" | Clusters provisioned by RKE clusters and most cloud providers require hostnames. -2.1.12 | "Ensure that the --rotate-certificates argument is not set to false (Scored)" | Cluster provisioned by RKE handles certificate rotation directly through RKE. -2.1.13 | "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)" | Cluster provisioned by RKE handles certificate rotation directly through RKE. -2.2.3 | "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored)" | Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -2.2.4 | "Ensure that the kubelet service file ownership is set to root:root (Scored)" | Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -2.2.9 | "Ensure that the kubelet configuration file ownership is set to root:root (Scored)" | RKE doesn’t require or maintain a configuration file for the kubelet. -2.2.10 | "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)" | RKE doesn’t require or maintain a configuration file for the kubelet. diff --git a/content/rancher/v2.x/en/cis-scans/v2.5/_index.md b/content/rancher/v2.x/en/cis-scans/v2.5/_index.md deleted file mode 100644 index cef5119a4..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.5/_index.md +++ /dev/null @@ -1,350 +0,0 @@ ---- -title: CIS Scans in Rancher v2.5 -shortTitle: Rancher v2.5 -weight: 1 ---- - -Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. - -The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. - -- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) -- [About the CIS Benchmark](#about-the-cis-benchmark) -- [About the Generated Report](#about-the-generated-report) -- [Test Profiles](#test-profiles) -- [About Skipped and Not Applicable Tests](#about-skipped-and-not-applicable-tests) -- [Roles-based Access Control](./rbac) -- [Configuration](./configuration) -- [How-to Guides](#how-to-guides) - - [Installing rancher-cis-benchmark](#installing-rancher-cis-benchmark) - - [Uninstalling rancher-cis-benchmark](#uninstalling-rancher-cis-benchmark) - - [Running a Scan](#running-a-scan) - - [Running a Scan Periodically on a Schedule](#running-a-scan-periodically-on-a-schedule) - - [Skipping Tests](#skipping-tests) - - [Viewing Reports](#viewing-reports) - - [Enabling Alerting for rancher-cis-benchmark](#enabling-alerting-for-rancher-cis-benchmark) - - [Configuring Alerts for a Periodic Scan on a Schedule](#configuring-alerts-for-a-periodic-scan-on-a-schedule) - - [Creating a Custom Benchmark Version for Running a Cluster Scan](#creating-a-custom-benchmark-version-for-running-a-cluster-scan) - -# Changes in Rancher v2.5 - -We now support running CIS scans on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. Previously it was only supported to run CIS scans on RKE Kubernetes clusters. - -In Rancher v2.4, the CIS scan tool was available from the **cluster manager** in the Rancher UI. Now it is available in the **Cluster Explorer** and it can be enabled and deployed using a Helm chart. It can be installed from the Rancher UI, but it can also be installed independently of Rancher. It deploys a CIS scan operator for the cluster, and deploys Kubernetes custom resources for cluster scans. The custom resources can be managed directly from the **Cluster Explorer.** - -In v1 of the CIS scan tool, which was available in Rancher v2.4 through the cluster manager, recurring scans could be scheduled. The ability to schedule recurring scans is now also available for CIS v2 from Rancher v2.5.4. - -Support for alerting for the cluster scan results is now also available from Rancher v2.5.4. - -In Rancher v2.4, permissive and hardened profiles were included. In Rancher v2.5.0 and in v2.5.4, more profiles were included. - -{{% tabs %}} -{{% tab "Profiles in v2.5.4" %}} -- Generic CIS 1.5 -- Generic CIS 1.6 -- RKE permissive 1.5 -- RKE hardened 1.5 -- RKE permissive 1.6 -- RKE hardened 1.6 -- EKS -- GKE -- RKE2 permissive 1.5 -- RKE2 permissive 1.5 -{{% /tab %}} -{{% tab "Profiles in v2.5.0-v2.5.3" %}} -- Generic CIS 1.5 -- RKE permissive -- RKE hardened -- EKS -- GKE -{{% /tab %}} -{{% /tabs %}} -
- - -The default profile and the supported CIS benchmark version depends on the type of cluster that will be scanned and the Rancher version: - -{{% tabs %}} -{{% tab "v2.5.4" %}} - -The `rancher-cis-benchmark` supports the CIS 1.6 Benchmark version. - -- For RKE Kubernetes clusters, the RKE Permissive 1.6 profile is the default. -- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. -- For RKE2 Kubernetes clusters, the RKE2 Permissive 1.5 profile is the default. -- For cluster types other than RKE, RKE2, EKS and GKE, the Generic CIS 1.5 profile will be used by default. - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.3" %}} - -The `rancher-cis-benchmark` supports the CIS 1.5 Benchmark version. - -- For RKE Kubernetes clusters, the RKE permissive profile is the default. -- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. -- For cluster types other than RKE, EKS and GKE, the Generic CIS 1.5 profile will be used by default. - -{{% /tab %}} -{{% /tabs %}} - -> **Note:** CIS v1 cannot run on a cluster when CIS v2 is deployed. In other words, after `rancher-cis-benchmark` is installed, you can't run scans by going to the Cluster Manager view in the Rancher UI and clicking Tools > CIS Scans. - -# About the CIS Benchmark - -The Center for Internet Security is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The official Benchmark documents are available through the CIS website. The sign-up form to access the documents is -here. - -# About the Generated Report - -Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. - -From Rancher v2.5.4, the scan uses the CIS Benchmark v1.6 by default. In Rancher v2.5.0-2.5.3, the CIS Benchmark v1.5. is used. - -The Benchmark version is included in the generated report. - -The Benchmark provides recommendations of two types: Scored and Not Scored. Recommendations marked as Not Scored in the Benchmark are not included in the generated report. - -Some tests are designated as "Not Applicable." These tests will not be run on any CIS scan because of the way that Rancher provisions RKE clusters. For information on how test results can be audited, and why some tests are designated to be not applicable, refer to Rancher's self-assessment guide for the corresponding Kubernetes version. - -The report contains the following information: - -| Column in Report | Description | -|------------------|-------------| -| `id` | The ID number of the CIS Benchmark. | -| `description` | The description of the CIS Benchmark test. | -| `remediation` | What needs to be fixed in order to pass the test. | -| `state` | Indicates if the test passed, failed, was skipped, or was not applicable. | -| `node_type` | The node role, which affects which tests are run on the node. Master tests are run on controlplane nodes, etcd tests are run on etcd nodes, and node tests are run on the worker nodes. | -| `audit` | This is the audit check that `kube-bench` runs for this test. | -| `audit_config` | Any configuration applicable to the audit script. | -| `test_info` | Test-related info as reported by `kube-bench`, if any. | -| `commands` | Test-related commands as reported by `kube-bench`, if any. | -| `config_commands` | Test-related configuration data as reported by `kube-bench`, if any. | -| `actual_value` | The test's actual value, present if reported by `kube-bench`. | -| `expected_result` | The test's expected result, present if reported by `kube-bench`. | - -Refer to the table in the cluster hardening guide for information on which versions of Kubernetes, the Benchmark, Rancher, and our cluster hardening guide correspond to each other. Also refer to the hardening guide for configuration files of CIS-compliant clusters and information on remediating failed tests. - -# Test Profiles - -The following profiles are available: - -{{% tabs %}} -{{% tab "Profiles in v2.5.4" %}} -- Generic CIS 1.5 -- Generic CIS 1.6 -- RKE permissive 1.5 -- RKE hardened 1.5 -- RKE permissive 1.6 -- RKE hardened 1.6 -- EKS -- GKE -- RKE2 permissive 1.5 -- RKE2 permissive 1.5 -{{% /tab %}} -{{% tab "Profiles in v2.5.0-v2.5.3" %}} -- Generic CIS 1.5 -- RKE permissive -- RKE hardened -- EKS -- GKE -{{% /tab %}} -{{% /tabs %}} - -You also have the ability to customize a profile by saving a set of tests to skip. - -All profiles will have a set of not applicable tests that will be skipped during the CIS scan. These tests are not applicable based on how a RKE cluster manages Kubernetes. - -There are two types of RKE cluster scan profiles: - -- **Permissive:** This profile has a set of tests that have been will be skipped as these tests will fail on a default RKE Kubernetes cluster. Besides the list of skipped tests, the profile will also not run the not applicable tests. -- **Hardened:** This profile will not skip any tests, except for the non-applicable tests. - -The EKS and GKE cluster scan profiles are based on CIS Benchmark versions that are specific to those types of clusters. - -In order to pass the "Hardened" profile, you will need to follow the steps on the hardening guide and use the `cluster.yml` defined in the hardening guide to provision a hardened cluster. - -# About Skipped and Not Applicable Tests - -For a list of skipped and not applicable tests, refer to this page. - -For now, only user-defined skipped tests are marked as skipped in the generated report. - -Any skipped tests that are defined as being skipped by one of the default profiles are marked as not applicable. - -# Roles-based Access Control - -For information about permissions, refer to this page. - -# Configuration - -For more information about configuring the custom resources for the scans, profiles, and benchmark versions, refer to this page. - -# How-to Guides - -- [Installing rancher-cis-benchmark](#installing-rancher-cis-benchmark) -- [Uninstalling rancher-cis-benchmark](#uninstalling-rancher-cis-benchmark) -- [Running a Scan](#running-a-scan) -- [Running a Scan Periodically on a Schedule](#running-a-scan-periodically-on-a-schedule) -- [Skipping Tests](#skipping-tests) -- [Viewing Reports](#viewing-reports) -- [Enabling Alerting for rancher-cis-benchmark](#enabling-alerting-for-rancher-cis-benchmark) -- [Configuring Alerts for a Periodic Scan on a Schedule](#configuring-alerts-for-a-periodic-scan-on-a-schedule) -- [Creating a Custom Benchmark Version for Running a Cluster Scan](#creating-a-custom-benchmark-version-for-running-a-cluster-scan) -### Installing rancher-cis-benchmark - -1. In the Rancher UI, go to the **Cluster Explorer.** -1. Click **Apps.** -1. Click `rancher-cis-benchmark`. -1. Click **Install.** - -**Result:** The CIS scan application is deployed on the Kubernetes cluster. - -### Uninstalling rancher-cis-benchmark - -1. From the **Cluster Explorer,** go to the top left dropdown menu and click **Apps & Marketplace.** -1. Click **Installed Apps.** -1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. -1. Click **Delete** and confirm **Delete.** - -**Result:** The `rancher-cis-benchmark` application is uninstalled. - -### Running a Scan - -When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. - -Note: There is currently a limitation of running only one CIS scan at a time for a cluster. If you create multiple ClusterScan custom resources, they will be run one after the other by the operator, and until one scan finishes, the rest of the ClusterScan custom resources will be in the "Pending" state. - -To run a scan, - -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. -1. Click **Create.** - -**Result:** A report is generated with the scan results. To see the results, click the name of the scan that appears. -### Running a Scan Periodically on a Schedule -_Available as of v2.5.4_ - -To run a ClusterScan on a schedule, - -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. -1. Choose the option **Run scan on a schedule.** -1. Enter a valid cron schedule expression in the field **Schedule.** -1. Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. -1. Click **Create.** - -**Result:** The scan runs and reschedules to run according to the cron schedule provided. The **Next Scan** value indicates the next time this scan will run again. - -A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. - -You can also see the previous reports by choosing the report from the **Reports** dropdown on the scan detail page. - -### Skipping Tests - -CIS scans can be run using test profiles with user-defined skips. - -To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. - -1. In the **Cluster Explorer,** go to the top-left dropdown menu and click **CIS Benchmark.** -1. Click **Profiles.** -1. From here, you can create a profile in multiple ways. To make a new profile, click **Create** and fill out the form in the UI. To make a new profile based on an existing profile, go to the existing profile, click the three vertical dots, and click **Clone as YAML.** If you are filling out the form, add the tests to skip using the test IDs, using the relevant CIS Benchmark as a reference. If you are creating the new test profile as YAML, you will add the IDs of the tests to skip in the `skipTests` directive. You will also give the profile a name: - - ```yaml - apiVersion: cis.cattle.io/v1 - kind: ClusterScanProfile - metadata: - annotations: - meta.helm.sh/release-name: clusterscan-operator - meta.helm.sh/release-namespace: cis-operator-system - labels: - app.kubernetes.io/managed-by: Helm - name: "" - spec: - benchmarkVersion: cis-1.5 - skipTests: - - "1.1.20" - - "1.1.21" - ``` -1. Click **Create.** - -**Result:** A new CIS scan profile is created. - -When you [run a scan](#running-a-scan) that uses this profile, the defined tests will be skipped during the scan. The skipped tests will be marked in the generated report as `Skip`. - -### Viewing Reports - -To view the generated CIS scan reports, - -1. In the **Cluster Explorer,** go to the top left dropdown menu and click **Cluster Explorer > CIS Benchmark.** -1. The **Scans** page will show the generated reports. To see a detailed report, go to a scan report and click the name. - -One can download the report from the Scans list or from the scan detail page. - -### Enabling Alerting for rancher-cis-benchmark -_Available as of v2.5.4_ - -Alerts can be configured to be sent out for a scan that runs on a schedule. - -> **Prerequisite:** -> -> Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/) -> -> While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/#example-route-config-for-cis-scan-alerts) - -While installing or upgrading the `rancher-cis-benchmark` application, set the following flag to `true` in the `values.yaml`: - -```yaml -alerts: - enabled: true -``` - -### Configuring Alerts for a Periodic Scan on a Schedule -_Available as of v2.5.4_ - -From Rancher v2.5.4, it is possible to run a ClusterScan on a schedule. - -A scheduled scan can also specify if you should receive alerts when the scan completes. - -Alerts are supported only for a scan that runs on a schedule. - -The `rancher-cis-benchmark` application supports two types of alerts: - -- Alert on scan completion: This alert is sent out when the scan run finishes. The alert includes details including the ClusterScan's name and the ClusterScanProfile name. -- Alert on scan failure: This alert is sent out if there are some test failures in the scan run or if the scan is in a `Fail` state. - -> **Prerequisite:** -> -> Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/) -> -> While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/#example-route-config-for-cis-scan-alerts) - -To configure alerts for a scan that runs on a schedule, - -1. Please enable alerts on the `rancher-cis-benchmark` application (#enabling-alerting-for-rancher-cis-benchmark) -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. -1. Choose the option **Run scan on a schedule.** -1. Enter a valid [cron schedule expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) in the field **Schedule.** -1. Check the boxes next to the Alert types under **Alerting.** -1. Optional: Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. -1. Click **Create.** - -**Result:** The scan runs and reschedules to run according to the cron schedule provided. Alerts are sent out when the scan finishes if routes and receiver are configured under `rancher-monitoring` application. - -A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. - -### Creating a Custom Benchmark Version for Running a Cluster Scan -_Available as of v2.5.4_ - -There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. - -It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. - -For details, see [this page.](./custom-benchmark) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cis-scans/v2.5/configuration/_index.md b/content/rancher/v2.x/en/cis-scans/v2.5/configuration/_index.md deleted file mode 100644 index ccc6df3f4..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.5/configuration/_index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Configuration -weight: 3 -aliases: - - /rancher/v2.x/en/cis-scans/configuration ---- - -This configuration reference is intended to help you manage the custom resources created by the `rancher-cis-benchmark` application. These resources are used for performing CIS scans on a cluster, skipping tests, setting the test profile that will be used during a scan, and other customization. - -To configure the custom resources, go to the **Cluster Explorer** in the Rancher UI. In dropdown menu in the top left corner, click **Cluster Explorer > CIS Benchmark.** - -### Scans - -A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. - -When configuring a scan, you need to define the name of the scan profile that will be used with the `scanProfileName` directive. - -An example ClusterScan custom resource is below: - -```yaml -apiVersion: cis.cattle.io/v1 -kind: ClusterScan -metadata: - name: rke-cis -spec: - scanProfileName: rke-profile-hardened -``` - -### Profiles - -A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. - -> By default, a few ClusterScanProfiles are installed as part of the `rancher-cis-benchmark` chart. If a user edits these default benchmarks or profiles, the next chart update will reset them back. So it is advisable for users to not edit the default ClusterScanProfiles. - -Users can clone the ClusterScanProfiles to create custom profiles. - -Skipped tests are listed under the `skipTests` directive. - -When you create a new profile, you will also need to give it a name. - -An example `ClusterScanProfile` is below: - -```yaml -apiVersion: cis.cattle.io/v1 -kind: ClusterScanProfile -metadata: - annotations: - meta.helm.sh/release-name: clusterscan-operator - meta.helm.sh/release-namespace: cis-operator-system - labels: - app.kubernetes.io/managed-by: Helm - name: "" -spec: - benchmarkVersion: cis-1.5 - skipTests: - - "1.1.20" - - "1.1.21" -``` - -### Benchmark Versions - -A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. - -A `ClusterScanBenchmark` defines the CIS `BenchmarkVersion` name and test configurations. The `BenchmarkVersion` name is a parameter provided to the `kube-bench` tool. - -By default, a few `BenchmarkVersion` names and test configurations are packaged as part of the CIS scan application. When this feature is enabled, these default BenchmarkVersions will be automatically installed and available for users to create a ClusterScanProfile. - -> If the default BenchmarkVersions are edited, the next chart update will reset them back. Therefore we don't recommend editing the default ClusterScanBenchmarks. - -A ClusterScanBenchmark consists of the fields: - -- `ClusterProvider`: This is the cluster provider name for which this benchmark is applicable. For example: RKE, EKS, GKE, etc. Leave it empty if this benchmark can be run on any cluster type. -- `MinKubernetesVersion`: Specifies the cluster's minimum kubernetes version necessary to run this benchmark. Leave it empty if there is no dependency on a particular Kubernetes version. -- `MaxKubernetesVersion`: Specifies the cluster's maximum Kubernetes version necessary to run this benchmark. Leave it empty if there is no dependency on a particular k8s version. - -An example `ClusterScanBenchmark` is below: - -```yaml -apiVersion: cis.cattle.io/v1 -kind: ClusterScanBenchmark -metadata: - annotations: - meta.helm.sh/release-name: clusterscan-operator - meta.helm.sh/release-namespace: cis-operator-system - creationTimestamp: "2020-08-28T18:18:07Z" - generation: 1 - labels: - app.kubernetes.io/managed-by: Helm - name: cis-1.5 - resourceVersion: "203878" - selfLink: /apis/cis.cattle.io/v1/clusterscanbenchmarks/cis-1.5 - uid: 309e543e-9102-4091-be91-08d7af7fb7a7 -spec: - clusterProvider: "" - minKubernetesVersion: 1.15.0 -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/cis-scans/v2.5/custom-benchmark/_index.md b/content/rancher/v2.x/en/cis-scans/v2.5/custom-benchmark/_index.md deleted file mode 100644 index f55e25478..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.5/custom-benchmark/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Creating a Custom Benchmark Version for Running a Cluster Scan -weight: 4 ---- - -_Available as of v2.5.4_ - -Each Benchmark Version defines a set of test configuration files that define the CIS tests to be run by the kube-bench tool. -The `rancher-cis-benchmark` application installs a few default Benchmark Versions which are listed under CIS Benchmark application menu. - -But there could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. - -It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. - -When a cluster scan is run, you need to select a Profile which points to a specific Benchmark Version. - -Follow all the steps below to add a custom Benchmark Version and run a scan using it. - -1. [Prepare the Custom Benchmark Version ConfigMap](#1-prepare-the-custom-benchmark-version-configmap) -2. [Add a Custom Benchmark Version to a Cluster](#2-add-a-custom-benchmark-version-to-a-cluster) -3. [Create a New Profile for the Custom Benchmark Version](#3-create-a-new-profile-for-the-custom-benchmark-version) -4. [Run a Scan Using the Custom Benchmark Version](#4-run-a-scan-using-the-custom-benchmark-version) - -### 1. Prepare the Custom Benchmark Version ConfigMap - -To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. - -To prepare a custom benchmark version ConfigMap, suppose we want to add a custom Benchmark Version named `foo`. - -1. Create a directory named `foo` and inside this directory, place all the config YAML files that the kube-bench tool looks for. For example, here are the config YAML files for a Generic CIS 1.5 Benchmark Version https://github.com/aquasecurity/kube-bench/tree/master/cfg/cis-1.5 -1. Place the complete `config.yaml` file, which includes all the components that should be tested. -1. Add the Benchmark version name to the `target_mapping` section of the `config.yaml`: - - ```yaml - target_mapping: - "foo": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - ``` -1. Upload this directory to your Kubernetes Cluster by creating a ConfigMap: - - ```yaml - kubectl create configmap -n foo --from-file= - ``` - -### 2. Add a Custom Benchmark Version to a Cluster - -1. Once the ConfigMap has been created in your cluster, navigate to the **Cluster Explorer** in the Rancher UI. -1. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Benchmark Versions** section, click **Create.** -1. Enter the **Name** and a description for your custom benchmark version. -1. Choose the cluster provider that your benchmark version applies to. -1. Choose the ConfigMap you have uploaded from the dropdown. -1. Add the minimum and maximum Kubernetes version limits applicable, if any. -1. Click **Create.** - -### 3. Create a New Profile for the Custom Benchmark Version - -To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. - -1. Once the custom benchmark version has been created in your cluster, navigate to the **Cluster Explorer** in the Rancher UI. -1. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Profiles** section, click **Create.** -1. Provide a **Name** and description. In this example, we name it `foo-profile`. -1. Choose the Benchmark Version `foo` from the dropdown. -1. Click **Create.** - -### 4. Run a Scan Using the Custom Benchmark Version - -Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. - -To run a scan, - -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose the new cluster scan profile `foo-profile`. -1. Click **Create.** - -**Result:** A report is generated with the scan results. To see the results, click the name of the scan that appears. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cis-scans/v2.5/rbac/_index.md b/content/rancher/v2.x/en/cis-scans/v2.5/rbac/_index.md deleted file mode 100644 index a3424ba56..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.5/rbac/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Roles-based Access Control -shortTitle: RBAC -weight: 3 -aliases: - - /rancher/v2.x/en/cis-scans/rbac ---- - -This section describes the permissions required to use the rancher-cis-benchmark App. - -The rancher-cis-benchmark is a cluster-admin only feature by default. - -However, the `rancher-cis-benchmark` chart installs these two default `ClusterRoles`: - -- cis-admin -- cis-view - -In Rancher, only cluster owners and global administrators have `cis-admin` access by default. - -Note: If you were using the `cis-edit` role added in Rancher v2.5 setup, it has now been removed since -Rancher v2.5.2 because it essentially is same as `cis-admin`. If you happen to create any clusterrolebindings -for `cis-edit`, please update them to use `cis-admin` ClusterRole instead. - -# Cluster-Admin Access - -Rancher CIS Scans is a cluster-admin only feature by default. -This means only the Rancher global admins, and the cluster’s cluster-owner can: - -- Install/Uninstall the rancher-cis-benchmark App -- See the navigation links for CIS Benchmark CRDs - ClusterScanBenchmarks, ClusterScanProfiles, ClusterScans -- List the default ClusterScanBenchmarks and ClusterScanProfiles -- Create/Edit/Delete new ClusterScanProfiles -- Create/Edit/Delete a new ClusterScan to run the CIS scan on the cluster -- View and Download the ClusterScanReport created after the ClusterScan is complete - - -# Summary of Default Permissions for Kubernetes Default Roles - -The rancher-cis-benchmark creates three `ClusterRoles` and adds the CIS Benchmark CRD access to the following default K8s `ClusterRoles`: - -| ClusterRole created by chart | Default K8s ClusterRole | Permissions given with Role -| ------------------------------| ---------------------------| ---------------------------| -| `cis-admin` | `admin`| Ability to CRUD clusterscanbenchmarks, clusterscanprofiles, clusterscans, clusterscanreports CR -| `cis-view` | `view `| Ability to List(R) clusterscanbenchmarks, clusterscanprofiles, clusterscans, clusterscanreports CR - - -By default only cluster-owner role will have ability to manage and use `rancher-cis-benchmark` feature. - -The other Rancher roles (cluster-member, project-owner, project-member) do not have any default permissions to manage and use rancher-cis-benchmark resources. - -But if a cluster-owner wants to delegate access to other users, they can do so by creating ClusterRoleBindings between these users and the above CIS ClusterRoles manually. -There is no automatic role aggregation supported for the `rancher-cis-benchmark` ClusterRoles. diff --git a/content/rancher/v2.x/en/cis-scans/v2.5/skipped-tests/_index.md b/content/rancher/v2.x/en/cis-scans/v2.5/skipped-tests/_index.md deleted file mode 100644 index 2fb1461e9..000000000 --- a/content/rancher/v2.x/en/cis-scans/v2.5/skipped-tests/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Skipped and Not Applicable Tests -weight: 3 -aliases: - - /rancher/v2.x/en/cis-scans/skipped-tests ---- - -This section lists the tests that are skipped in the permissive test profile for RKE. - -> All the tests that are skipped and not applicable on this page will be counted as Not Applicable in the v2.5 generated report. The skipped test count will only mention the user-defined skipped tests. This allows user-skipped tests to be distinguished from the tests that are skipped by default in the RKE permissive test profile. - -# CIS Benchmark v1.5 - -### CIS Benchmark v1.5 Skipped Tests - -| Number | Description | Reason for Skipping | -| ---------- | ------------- | --------- | -| 1.1.12 | Ensure that the etcd data directory ownership is set to etcd:etcd (Scored) | A system service account is required for etcd data directory ownership. Refer to Rancher's hardening guide for more details on how to configure this ownership. | -| 1.2.6 | Ensure that the --kubelet-certificate-authority argument is set as appropriate (Scored) | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. | -| 1.2.16 | Ensure that the admission control plugin PodSecurityPolicy is set (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 1.2.33 | Ensure that the --encryption-provider-config argument is set as appropriate (Not Scored) | Enabling encryption changes how data can be recovered as data is encrypted. | -| 1.2.34 | Ensure that encryption providers are appropriately configured (Not Scored) | Enabling encryption changes how data can be recovered as data is encrypted. | -| 4.2.6 | Ensure that the --protect-kernel-defaults argument is set to true (Scored) | System level configurations are required before provisioning the cluster in order for this argument to be set to true. | -| 4.2.10 | Ensure that the--tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored) | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. | -| 5.1.5 | Ensure that default service accounts are not actively used. (Scored) | Kubernetes provides default service accounts to be used. | -| 5.2.2 | Minimize the admission of containers wishing to share the host process ID namespace (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.2.3 | Minimize the admission of containers wishing to share the host IPC namespace (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.2.4 | Minimize the admission of containers wishing to share the host network namespace (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.2.5 | Minimize the admission of containers with allowPrivilegeEscalation (Scored) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | -| 5.3.2 | Ensure that all Namespaces have Network Policies defined (Scored) | Enabling Network Policies can prevent certain applications from communicating with each other. | -| 5.6.4 | The default namespace should not be used (Scored) | Kubernetes provides a default namespace. | - -### CIS Benchmark v1.5 Not Applicable Tests - -| Number | Description | Reason for being not applicable | -| ---------- | ------------- | --------- | -| 1.1.1 | Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. | -| 1.1.2 | Ensure that the API server pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. | -| 1.1.3 | Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.1.4 | Ensure that the controller manager pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.1.5 | Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.6 | Ensure that the scheduler pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.7 | Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. | -| 1.1.8 | Ensure that the etcd pod specification file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. | -| 1.1.13 | Ensure that the admin.conf file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. | -| 1.1.14 | Ensure that the admin.conf file ownership is set to root:root (Scored) | Clusters provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. | -| 1.1.15 | Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.16 | Ensure that the scheduler.conf file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | -| 1.1.17 | Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.1.18 | Ensure that the controller-manager.conf file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | -| 1.3.6 | Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) | Clusters provisioned by RKE handles certificate rotation directly through RKE. | -| 4.1.1 | Ensure that the kubelet service file permissions are set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. | -| 4.1.2 | Ensure that the kubelet service file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. | -| 4.1.9 | Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. All configuration is passed in as arguments at container run time. | -| 4.1.10 | Ensure that the kubelet configuration file ownership is set to root:root (Scored) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. All configuration is passed in as arguments at container run time. | -| 4.2.12 | Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) | Clusters provisioned by RKE handles certificate rotation directly through RKE. | \ No newline at end of file diff --git a/content/rancher/v2.x/en/cli/_index.md b/content/rancher/v2.x/en/cli/_index.md deleted file mode 100644 index 60efccf16..000000000 --- a/content/rancher/v2.x/en/cli/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Using the Rancher Command Line Interface -description: The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI -metaTitle: "Using the Rancher Command Line Interface " -metaDescription: "The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI" -weight: 21 -aliases: - - /rancher/v2.x/en/cluster-admin/cluster-access/cli ---- - -The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. - -### Download Rancher CLI - -The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. - -### Requirements - -After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - -- Your Your Rancher Server URL, which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.x/en/user-settings/api-keys/). - -### CLI Authentication - -Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): - -```bash -$ ./rancher login https:// --token -``` - -If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. - -### Project Selection - -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. - -**Example: `./rancher context switch` Output** -``` -User:rancher-cli-directory user$ ./rancher context switch -NUMBER CLUSTER NAME PROJECT ID PROJECT NAME -1 cluster-2 c-7q96s:p-h4tmb project-2 -2 cluster-2 c-7q96s:project-j6z6d Default -3 cluster-1 c-lchzv:p-xbpdt project-1 -4 cluster-1 c-lchzv:project-s2mch Default -Select a Project: -``` - -After you enter a number, the console displays a message that you've changed projects. - -``` -INFO[0005] Setting new context to project project-1 -INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json -``` - -### Commands - -The following commands are available for use in Rancher CLI. - -| Command | Result | -|---|---| -| `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or [Rancher charts]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/#chart-directory-structure). | -| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.x/en/catalog/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | -| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | -| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | -| `namespaces, [namespace]` |Performs operations on [namespaces]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). | -| `nodes, [node]` |Performs operations on [nodes]({{}}/rancher/v2.x/en/overview/architecture/#kubernetes). | -| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads) in a project. | -| `settings, [setting]` | Shows the current settings for your Rancher Server. | -| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | -| `help, [h]` | Shows a list of commands or help for one command. | - - -### Rancher CLI Help - -Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. - -All commands accept the `--help` flag, which documents each command's usage. diff --git a/content/rancher/v2.x/en/cluster-admin/_index.md b/content/rancher/v2.x/en/cluster-admin/_index.md deleted file mode 100644 index 022d90a8d..000000000 --- a/content/rancher/v2.x/en/cluster-admin/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Cluster Administration -weight: 8 ---- - -After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. - -This page covers the following topics: - -- [Switching between clusters](#switching-between-clusters) -- [Managing clusters in Rancher](#managing-clusters-in-rancher) -- [Configuring tools](#configuring-tools) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. - -## Switching between Clusters - -To switch between clusters, use the drop-down available in the navigation bar. - -Alternatively, you can switch between projects and clusters directly in the navigation bar. Open the **Global** view and select **Clusters** from the main menu. Then select the name of the cluster you want to open. - -## Managing Clusters in Rancher - -After clusters have been [provisioned into Rancher]({{}}/rancher/v2.x/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} - -## Configuring Tools - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - -- Alerts -- Notifiers -- Logging -- Monitoring -- Istio Service Mesh -- OPA Gatekeeper - -For more information, see [Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/) diff --git a/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md b/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md deleted file mode 100644 index df3b68815..000000000 --- a/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md +++ /dev/null @@ -1,220 +0,0 @@ ---- -title: Backing up a Cluster -weight: 2045 ---- - -_Available as of v2.2.0_ - -In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. - -Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. - -Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -This section covers the following topics: - -- [How snapshots work](#how-snapshots-work) -- [Configuring recurring snapshots](#configuring-recurring-snapshots) -- [One-time snapshots](#one-time-snapshots) -- [Snapshot backup targets](#snapshot-backup-targets) - - [Local backup target](#local-backup-target) - - [S3 backup target](#s3-backup-target) - - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) - - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) -- [Viewing available snapshots](#viewing-available-snapshots) -- [Safe timestamps](#safe-timestamps) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -# How Snapshots Work - -{{% tabs %}} -{{% tab "Rancher v2.4.0+" %}} - -### Snapshot Components - -When Rancher creates a snapshot, it includes three components: - -- The cluster data in etcd -- The Kubernetes version -- The cluster configuration in the form of the `cluster.yml` - -Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. - -The multiple components of the snapshot allow you to select from the following options if you need to restore a cluster from a snapshot: - -- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -It's always recommended to take a new snapshot before any upgrades. - -### Generating the Snapshot from etcd Nodes - -For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. - -The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. - -In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. - -### Snapshot Naming Conventions - -The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. - -When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: - -- `m` stands for manual -- `r` stands for recurring -- `l` stands for local -- `s` stands for S3 - -Some example snapshot names are: - -- c-9dmxz-rl-8b2cx -- c-9dmxz-ml-kr56m -- c-9dmxz-ms-t6bjb -- c-9dmxz-rs-8gxc8 - -### How Restoring from a Snapshot Works - -On restore, the following process is used: - -1. The snapshot is retrieved from S3, if S3 is configured. -2. The snapshot is unzipped (if zipped). -3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. -4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. -5. The cluster is restored and post-restore actions will be done in the cluster. - -{{% /tab %}} -{{% tab "Rancher before v2.4.0" %}} -When Rancher creates a snapshot, only the etcd data is included in the snapshot. - -Because the Kubernetes version is not included in the snapshot, there is no option to restore a cluster to a different Kubernetes version. - -It's always recommended to take a new snapshot before any upgrades. - -### Generating the Snapshot from etcd Nodes - -For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. - -The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. - -In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. - -### Snapshot Naming Conventions - -The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. - -When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: - -- `m` stands for manual -- `r` stands for recurring -- `l` stands for local -- `s` stands for S3 - -Some example snapshot names are: - -- c-9dmxz-rl-8b2cx -- c-9dmxz-ml-kr56m -- c-9dmxz-ms-t6bjb -- c-9dmxz-rs-8gxc8 - -### How Restoring from a Snapshot Works - -On restore, the following process is used: - -1. The snapshot is retrieved from S3, if S3 is configured. -2. The snapshot is unzipped (if zipped). -3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. -4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. -5. The cluster is restored and post-restore actions will be done in the cluster. - -{{% /tab %}} -{{% /tabs %}} - -# Configuring Recurring Snapshots - -Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. - -By default, [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. - -During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. - -In the **Advanced Cluster Options** section, there are several options available to configure: - -| Option | Description | Default Value| -| --- | ---| --- | -| etcd Snapshot Backup Target | Select where you want the snapshots to be saved. Options are either local or in S3 | local| -|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| -| Recurring etcd Snapshot Creation Period | Time in hours between recurring snapshots| 12 hours | -| Recurring etcd Snapshot Retention Count | Number of snapshots to retain| 6 | - -# One-Time Snapshots - -In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. - -1. In the **Global** view, navigate to the cluster that you want to take a one-time snapshot. - -2. Click the **⋮ > Snapshot Now**. - -**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. - -# Snapshot Backup Targets - -Rancher supports two different backup targets: - -* [Local Target](#local-backup-target) -* [S3 Target](#s3-backup-target) - -### Local Backup Target - -By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. - -### S3 Backup Target - -The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. - -| Option | Description | Required| -|---|---|---| -|S3 Bucket Name| S3 bucket name where backups will be stored| *| -|S3 Region|S3 region for the backup bucket| | -|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | -|S3 Access Key|S3 access key with permission to access the backup bucket|*| -|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| -| Custom CA Certificate | A custom certificate used to access private S3 backends _Available as of v2.2.5_ || - -### Using a custom CA certificate for S3 - -_Available as of v2.2.5_ - -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. - -### IAM Support for Storing Snapshots in S3 - -The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: - - - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. - - The cluster etcd nodes must have network access to the specified S3 endpoint. - - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. - - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. - - To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -# Viewing Available Snapshots - -The list of all available snapshots for the cluster is available in the Rancher UI. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -# Safe Timestamps - -_Available as of v2.3.0_ - -As of v2.2.6, snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. As of Rancher v2.3.0, the option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. - -This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md b/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md deleted file mode 100644 index 357ab776e..000000000 --- a/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Certificate Rotation -weight: 2040 ---- - -> **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. - -By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. - -Certificates can be rotated for the following services: - -- etcd -- kubelet -- kube-apiserver -- kube-proxy -- kube-scheduler -- kube-controller-manager - - -### Certificate Rotation in Rancher v2.2.x - -_Available as of v2.2.0_ - -Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. - -1. In the **Global** view, navigate to the cluster that you want to rotate certificates. - -2. Select the **⋮ > Rotate Certificates**. - -3. Select which certificates that you want to rotate. - - * Rotate all Service certificates (keep the same CA) - * Rotate an individual service and choose one of the services from the drop down menu - -4. Click **Save**. - -**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. - -> **Note:** Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher Launched Kubernetes clusters. - - -### Certificate Rotation in Rancher v2.1.x and v2.0.x - -_Available as of v2.0.14 and v2.1.9_ - -Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the API. - -1. In the **Global** view, navigate to the cluster that you want to rotate certificates. - -2. Select the **⋮ > View in API**. - -3. Click on **RotateCertificates**. - -4. Click on **Show Request**. - -5. Click on **Send Request**. - -**Results:** All Kubernetes certificates will be rotated. - -### Rotating Expired Certificates After Upgrading Older Rancher Versions - -If you are upgrading from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your clusters have expired certificates, some manual steps are required to complete the certificate rotation. - -1. For the `controlplane` and `etcd` nodes, log in to each corresponding host and check if the certificate `kube-apiserver-requestheader-ca.pem` is in the following directory: - - ``` - cd /etc/kubernetes/.tmp - ``` - - If the certificate is not in the directory, perform the following commands: - - ``` - cp kube-ca.pem kube-apiserver-requestheader-ca.pem - cp kube-ca-key.pem kube-apiserver-requestheader-ca-key.pem - cp kube-apiserver.pem kube-apiserver-proxy-client.pem - cp kube-apiserver-key.pem kube-apiserver-proxy-client-key.pem - ``` - - If the `.tmp` directory does not exist, you can copy the entire SSL certificate to `.tmp`: - - ``` - cp -r /etc/kubernetes/ssl /etc/kubernetes/.tmp - ``` - -1. Rotate the certificates. For Rancher v2.0.x and v2.1.x, use the [Rancher API.](#certificate-rotation-in-rancher-v2-1-x-and-v2-0-x) For Rancher 2.2.x, [use the UI.](#certificate-rotation-in-rancher-v2-2-x) - -1. After the command is finished, check if the `worker` nodes are Active. If not, log in to each `worker` node and restart the kubelet and proxy. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md deleted file mode 100644 index 4cd99d798..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Removing Kubernetes Components from Nodes -description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually -weight: 2055 ---- - -This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. - -When you use Rancher to [launch nodes for a cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher), resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. - -When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. - -## What Gets Removed? - -When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. - -| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Imported Nodes][4] | -| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | -| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | -| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | -| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | -| Rancher Deployment | ✓ | ✓ | ✓ | | -| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | -| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | -| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | - -[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ - -## Removing a Node from a Cluster by Rancher UI - -When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -## Removing Rancher Components from a Cluster Manually - -When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. - ->**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. - -### Removing Rancher Components from Imported Clusters - -For imported clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. - -After the imported cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. - -{{% tabs %}} -{{% tab "By UI / API" %}} ->**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. - -After you initiate the removal of an [imported cluster]({{}}/rancher/v2.x/en/cluster-provisioning/#import-existing-cluster) using the Rancher UI (or API), the following events occur. - -1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. - -1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. - -1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. - -**Result:** All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% tab "By Script" %}} -Rather than cleaning imported cluster nodes using the Rancher UI, you can run a script instead. This functionality is available since `v2.1.0`. - ->**Prerequisite:** -> ->Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. - -1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: - - ``` - chmod +x user-cluster.sh - ``` - -1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. - - If you don't have an air gap environment, skip this step. - -1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): - - >**Tip:** - > - >Add the `-dry-run` flag to preview the script's outcome without making changes. - ``` - ./user-cluster.sh rancher/rancher-agent: - ``` - -**Result:** The script runs. All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% /tabs %}} - -### Windows Nodes - -To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. - -To run the script, you can use this command in the PowerShell: - -``` -pushd c:\etc\rancher -.\cleanup.ps1 -popd -``` - -**Result:** The node is reset and can be re-added to a Kubernetes cluster. - -### Docker Containers, Images, and Volumes - -Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) - -**To clean all Docker containers, images and volumes:** - -``` -docker rm -f $(docker ps -qa) -docker rmi -f $(docker images -q) -docker volume rm $(docker volume ls -q) -``` - -### Mounts - -Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. - -Mounts | ---------| -`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | -`/var/lib/kubelet` | -`/var/lib/rancher` | - -**To unmount all mounts:** - -``` -for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done -``` - -### Directories and Files - -The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. - ->**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. - -Directories | ---------| -`/etc/ceph` | -`/etc/cni` | -`/etc/kubernetes` | -`/opt/cni` | -`/opt/rke` | -`/run/secrets/kubernetes.io` | -`/run/calico` | -`/run/flannel` | -`/var/lib/calico` | -`/var/lib/etcd` | -`/var/lib/cni` | -`/var/lib/kubelet` | -`/var/lib/rancher/rke/log` | -`/var/log/containers` | -`/var/log/kube-audit` | -`/var/log/pods` | -`/var/run/calico` | - -**To clean the directories:** - -``` -rm -rf /etc/ceph \ - /etc/cni \ - /etc/kubernetes \ - /opt/cni \ - /opt/rke \ - /run/secrets/kubernetes.io \ - /run/calico \ - /run/flannel \ - /var/lib/calico \ - /var/lib/etcd \ - /var/lib/cni \ - /var/lib/kubelet \ - /var/lib/rancher/rke/log \ - /var/log/containers \ - /var/log/kube-audit \ - /var/log/pods \ - /var/run/calico -``` - -### Network Interfaces and Iptables - -The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. - -### Network Interfaces - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. - -Interfaces | ---------| -`flannel.1` | -`cni0` | -`tunl0` | -`caliXXXXXXXXXXX` (random interface names) | -`vethXXXXXXXX` (random interface names) | - -**To list all interfaces:** - -``` -# Using ip -ip address show - -# Using ifconfig -ifconfig -a -``` - -**To remove an interface:** - -``` -ip link delete interface_name -``` - -### Iptables - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. - -Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. - -Chains | ---------| -`cali-failsafe-in` | -`cali-failsafe-out` | -`cali-fip-dnat` | -`cali-fip-snat` | -`cali-from-hep-forward` | -`cali-from-host-endpoint` | -`cali-from-wl-dispatch` | -`cali-fw-caliXXXXXXXXXXX` (random chain names) | -`cali-nat-outgoing` | -`cali-pri-kns.NAMESPACE` (chain per namespace) | -`cali-pro-kns.NAMESPACE` (chain per namespace) | -`cali-to-hep-forward` | -`cali-to-host-endpoint` | -`cali-to-wl-dispatch` | -`cali-tw-caliXXXXXXXXXXX` (random chain names) | -`cali-wl-to-host` | -`KUBE-EXTERNAL-SERVICES` | -`KUBE-FIREWALL` | -`KUBE-MARK-DROP` | -`KUBE-MARK-MASQ` | -`KUBE-NODEPORTS` | -`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | -`KUBE-SERVICES` | -`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | - -**To list all iptables rules:** - -``` -iptables -L -t nat -iptables -L -t mangle -iptables -L -``` diff --git a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md deleted file mode 100644 index 10962edea..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Cloning Clusters -weight: 2035 -aliases: - - /rancher/v2.x/en/cluster-provisioning/cloning-clusters/ ---- - -If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. - -Duplication of imported clusters is not supported. - -| Cluster Type | Cloneable? | -|----------------------------------|---------------| -| [Nodes Hosted by Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | -| [Hosted Kubernetes Providers]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | ✓ | -| [Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) | ✓ | -| [Imported Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) | | - -> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. - -## Prerequisites - -Download and install [Rancher CLI]({{}}/rancher/v2.x/en/cli). Remember to [create an API bearer token]({{}}/rancher/v2.x/en/user-settings/api-keys) if necessary. - - -## 1. Export Cluster Config - -Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. - -1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. - -1. Enter the following command to list the clusters managed by Rancher. - - - ./rancher cluster ls - - -1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. - -1. Enter the following command to export the configuration for your cluster. - - - ./rancher clusters export - - - **Step Result:** The YAML for a cloned cluster prints to Terminal. - -1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). - -## 2. Modify Cluster Config - -Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. - -> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. - - >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. - - -1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. - - ```yml - Version: v3 - clusters: - : # ENTER UNIQUE NAME - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false - rancherKubernetesEngineConfig: - addonJobTimeout: 30 - authentication: - strategy: x509 - authorization: {} - bastionHost: {} - cloudProvider: {} - ignoreDockerVersion: true - ``` - -1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. - - ```yml - nodePools: - : - clusterId: do - controlPlane: true - etcd: true - hostnamePrefix: mark-do - nodeTemplateId: do - quantity: 1 - worker: true - ``` - -1. When you're done, save and close the configuration. - -## 3. Launch Cloned Cluster - -Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: - - ./rancher up --file cluster-template.yml - -**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. You can also log into the Rancher UI and open the **Global** view to watch your provisioning cluster's progress. diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md deleted file mode 100644 index 418726bd9..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Cluster Access -weight: 1 ---- - -This section is about what tools can be used to access clusters managed by Rancher. - -For information on how to give users permission to access a cluster, see the section on [adding users to clusters.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) - -For more information on roles-based access control, see [this section.]({{}}/rancher/v2.x/en/admin-settings/rbac/) - -For information on how to set up an authentication system, see [this section.]({{}}/rancher/v2.x/en/admin-settings/authentication/) - - -### Rancher UI - -Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. - -### kubectl - -You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - -- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/). -- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](./kubectl/). - -### Rancher CLI - -You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{}}/rancher/v2.x/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. - -### Rancher API - -Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{}}/rancher/v2.x/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/ace/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/ace/_index.md deleted file mode 100644 index c3bdd5bf1..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-access/ace/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: How the Authorized Cluster Endpoint Works -weight: 2015 ---- - -This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -### About the kubeconfig File - -The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). - -This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. - -After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. - -_Available as of v2.4.6_ - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.x/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](../cli) to be present in your PATH. - - -### Two Authentication Methods for RKE Clusters - -If the cluster is not an [RKE cluster,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. - -For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: - -- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. -- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. - -This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. - -To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page]({{}}/rancher/v2.x/en/overview/architecture/#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. - -### About the kube-api-auth Authentication Webhook - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) which is only available for [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. - -The scheduling rules for `kube-api-auth` are listed below: - -_Applies to v2.3.0 and higher_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
`node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md deleted file mode 100644 index 0edd67b07..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Adding Users to Clusters -weight: 2020 -aliases: - - /rancher/v2.x/en/tasks/clusters/adding-managing-cluster-members/ - - /rancher/v2.x/en/k8s-in-rancher/cluster-members/ - - /rancher/v2.x/en/cluster-admin/cluster-members ---- - -If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. - ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. - -There are two contexts where you can add cluster members: - -- Adding Members to a New Cluster - - You can add members to a cluster as you create it (recommended if possible). - -- [Adding Members to an Existing Cluster](#editing-cluster-membership) - - You can always add members to a cluster after a cluster is provisioned. - -## Editing Cluster Membership - -Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. - -1. From the **Global** view, open the cluster that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the cluster. - - If external authentication is configured: - - - Rancher returns users from your [external authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/) source as you type. - - >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/). - - - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -4. Assign the user or group **Cluster** roles. - - [What are Cluster Roles?]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - - >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles). - -**Result:** The chosen users are added to the cluster. - -- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/_index.md deleted file mode 100644 index f8d6817e6..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Access a Cluster with Kubectl and kubeconfig" -description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." -weight: 2010 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/kubectl/ - - /rancher/v2.x/en/cluster-admin/kubectl - - /rancher/v2.x/en/concepts/clusters/kubeconfig-files/ - - /rancher/v2.x/en/k8s-in-rancher/kubeconfig/ - - /rancher/2.x/en/cluster-admin/kubeconfig ---- - -This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. - -For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). - -- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) -- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) -- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) -- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) - - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) - - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) - - -### Accessing Clusters with kubectl Shell in the Rancher UI - -You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. - -1. From the **Global** view, open the cluster that you want to access with kubectl. - -2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. - -### Accessing Clusters with kubectl from Your Workstation - -This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. - -This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. - -> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. -1. Click **Kubeconfig File**. -1. Copy the contents displayed to your clipboard. -1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: - ``` - kubectl --kubeconfig /custom/path/kube.config get pods - ``` -1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. - - -### Note on Resources Created Using kubectl - -Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. - -# Authenticating Directly with a Downstream Cluster - -This section intended to help you set up an alternative method to access an [RKE cluster.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) - -This method is only available for RKE clusters that have the [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](../ace) - -We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. - -> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) - -To find the name of the context(s) in your downloaded kubeconfig file, run: - -``` -kubectl config get-contexts --kubeconfig /custom/path/kube.config -CURRENT NAME CLUSTER AUTHINFO NAMESPACE -* my-cluster my-cluster user-46tmn - my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn -``` - -In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. - -With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. - -When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. - -### Connecting Directly to Clusters with FQDN Defined - -If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: - -``` -kubectl --context -fqdn get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods -``` - -### Connecting Directly to Clusters without FQDN Defined - -If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: -``` -kubectl --context - get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context - get pods -``` diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-autoscaler/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-autoscaler/_index.md deleted file mode 100644 index 4f1d2e144..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-autoscaler/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Cluster Autoscaler -weight: 1 ---- - -In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. - -To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. - -Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. - -It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. - -# Cloud Providers - -Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) - -### Setting up Cluster Autoscaler on Amazon Cloud Provider - -For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon) diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon/_index.md deleted file mode 100644 index d173d444e..000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon/_index.md +++ /dev/null @@ -1,580 +0,0 @@ ---- -title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups -weight: 1 ---- - -This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. - -- [Prerequisites](#prerequisites) -- [1. Create a Custom Cluster](#1-create-a-custom-cluster) -- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) -- [3. Deploy Nodes](#3-deploy-nodes) -- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) - - [Parameters](#parameters) - - [Deployment](#deployment) -- [Testing](#testing) - - [Generating Load](#generating-load) - - [Checking Scale](#checking-scale) - -# Prerequisites - -These elements are required to follow this guide: - -* The Rancher server is up and running -* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles - -### 1. Create a Custom Cluster - -On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: - -* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag -* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag -* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster - - ```sh - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum - ``` - -### 2. Configure the Cloud Provider - -On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. - -1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. - * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:DescribeTags", - "autoscaling:DescribeLaunchConfigurations", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. - * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - - * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` - * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.x/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--etcd --controlplane" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. - * IAM profile: Provides cloud_provider worker integration. - This profile is called `K8sWorkerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] - } - ``` - - * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` - * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.x/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--worker" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -More info is at [RKE clusters on AWS]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - -### 3. Deploy Nodes - -Once we've configured AWS, let's create VMs to bootstrap our cluster: - -* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/production/) - * IAM role: `K8sMasterRole` - * Security group: `K8sMasterSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` - -* worker: Define an ASG on EC2 with the following settings: - * Name: `K8sWorkerAsg` - * IAM role: `K8sWorkerRole` - * Security group: `K8sWorkerSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` - * Instances: - * minimum: 2 - * desired: 2 - * maximum: 10 - -Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. - -### 4. Install Cluster-autoscaler - -At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. - -#### Parameters - -This table shows cluster-autoscaler parameters for fine tuning: - -| Parameter | Default | Description | -|---|---|---| -|cluster-name|-|Autoscaled cluster name, if available| -|address|:8085|The address to expose Prometheus metrics| -|kubernetes|-|Kubernetes master location. Leave blank for default| -|kubeconfig|-|Path to kubeconfig file with authorization and master location information| -|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| -|namespace|"kube-system"|Namespace in which cluster-autoscaler run| -|scale-down-enabled|true|Should CA scale down the cluster| -|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| -|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| -|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| -|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| -|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| -|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| -|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| -|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| -|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| -|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| -|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| -|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -cloud-provider|-|Cloud provider type| -|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| -|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| -|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| -|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| -|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| -|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| -|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| -|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| -|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: ::| -|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| -|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| -|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| -|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| -|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| -|write-status-configmap|true|Should CA write status information to a configmap| -|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| -|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| -|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| -|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| -|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| -|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| -|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| -|regional|false|Cluster is regional| -|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| -|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| -|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| -|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| -|profiling|false|Is debug/pprof endpoint enabled| - -#### Deployment - -Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: - - -```yml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler - name: cluster-autoscaler - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["events", "endpoints"] - verbs: ["create", "patch"] - - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["endpoints"] - resourceNames: ["cluster-autoscaler"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: - - "pods" - - "services" - - "replicationcontrollers" - - "persistentvolumeclaims" - - "persistentvolumes" - verbs: ["watch", "list", "get"] - - apiGroups: ["extensions"] - resources: ["replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["watch", "list"] - - apiGroups: ["apps"] - resources: ["statefulsets", "replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["watch", "list", "get"] - - apiGroups: ["batch", "extensions"] - resources: ["jobs"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resourceNames: ["cluster-autoscaler"] - resources: ["leases"] - verbs: ["get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create","list","watch"] - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] - verbs: ["delete", "get", "update", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - app: cluster-autoscaler -spec: - replicas: 1 - selector: - matchLabels: - app: cluster-autoscaler - template: - metadata: - labels: - app: cluster-autoscaler - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '8085' - spec: - serviceAccountName: cluster-autoscaler - tolerations: - - effect: NoSchedule - operator: "Equal" - value: "true" - key: node-role.kubernetes.io/controlplane - nodeSelector: - node-role.kubernetes.io/controlplane: "true" - containers: - - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 - name: cluster-autoscaler - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - command: - - ./cluster-autoscaler - - --v=4 - - --stderrthreshold=info - - --cloud-provider=aws - - --skip-nodes-with-local-storage=false - - --expander=least-waste - - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs/ca-certificates.crt - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - -``` - -Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): - -```sh -kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml -``` - -**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) - -# Testing - -At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. - -### Generating Load - -We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world -spec: - replicas: 3 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - ports: - - containerPort: 80 - protocol: TCP - resources: - limits: - cpu: 1000m - memory: 1024Mi - requests: - cpu: 1000m - memory: 1024Mi -``` - -Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): - -``` -kubectl -n default apply -f test-deployment.yaml -``` - -### Checking Scale - -Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. - -Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md deleted file mode 100644 index aa7c68fba..000000000 --- a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Cluster Configuration -weight: 2025 ---- - -After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. - -For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members) - -- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) -- [Editing Clusters in the Rancher UI](#editing-clusters-in-the-rancher-ui) -- [Editing Clusters with YAML](#editing-clusters-with-yaml) -- [Updating ingress-nginx](#updating-ingress-nginx) - -### Cluster Management Capabilities by Cluster Type - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} - -### Editing Clusters in the Rancher UI - -To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. - -In [clusters launched by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. - -Note that these options are not available for imported clusters or hosted Kubernetes clusters. - -Option | Description | ----------|----------| - Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes). | - Network Provider | The \container networking interface (CNI) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | - Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | - Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | - Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | - Pod Security Policy Support | Enables [pod security policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{}}/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | - Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | - Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | - Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | - -### Editing Clusters with YAML - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from File**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) - ->**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - - - - -### Updating ingress-nginx - -Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. - -If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/nodes/_index.md b/content/rancher/v2.x/en/cluster-admin/nodes/_index.md deleted file mode 100644 index 21847e63f..000000000 --- a/content/rancher/v2.x/en/cluster-admin/nodes/_index.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -title: Nodes and Node Pools -weight: 2030 ---- - -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) to provision the cluster, there are different node options available. - -> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters). - -This section covers the following topics: - -- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) - - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) - - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) - - [Registered/Imported nodes](#registered-imported-nodes) -- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) -- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) -- [Deleting a node](#deleting-a-node) -- [Scaling nodes](#scaling-nodes) -- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) -- [Cordoning a node](#cordoning-a-node) -- [Draining a node](#draining-a-node) - - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) - - [Grace period](#grace-period) - - [Timeout](#timeout) - - [Drained and cordoned state](#drained-and-cordoned-state) -- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) - -# Node Options Available for Each Cluster Creation Option - -The following table lists which node options are available for each type of cluster in Rancher. Click the links in the **Option** column for more detailed information about each feature. - -{{% tabs %}} -{{% tab "Rancher v2.5" %}} -| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Registered EKS Nodes][4] | [All Other Registered Nodes][5] | Description | -| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | -------------------| ------------------------------------------------------------------ | -| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable. | -| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable _and_ evicts all pods. | -| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | ✓ | ✓ | Enter a custom name, description, label, or taints for a node. | -| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | ✓ | ✓ | View API data. | -| [Delete](#deleting-a-node) | ✓ | ✓ | | * | * | Deletes defective nodes from the cluster. | -| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | | Download SSH key in order to SSH into the node. | -| [Node Scaling](#scaling-nodes) | ✓ | | | ✓ | | Scale the number of nodes in the node pool up or down. | - -[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.x/en/cluster-provisioning/registered-clusters/ -[5]: {{}}/rancher/v2.x/en/cluster-provisioning/registered-clusters/ - -\* Delete option accessible via View API - -{{% /tab %}} -{{% tab "Rancher v2.0-v2.4" %}} -| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Imported Nodes][4] | Description | -| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | ------------------------------------------------------------------ | -| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable. | -| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable _and_ evicts all pods. | -| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | ✓ | Enter a custom name, description, label, or taints for a node. | -| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | ✓ | View API data. | -| [Delete](#deleting-a-node) | ✓ | ✓ | | | Deletes defective nodes from the cluster. | -| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key in order to SSH into the node. | -| [Node Scaling](#scaling-nodes) | ✓ | | | | Scale the number of nodes in the node pool up or down. | - -[1]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ -{{% /tab %}} -{{% /tabs %}} - -### Nodes Hosted by an Infrastructure Provider - -Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) - -Clusters provisioned using [one of the node pool options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) can be scaled up or down if the node pool is edited. - -A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. - -Rancher uses [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. - -### Nodes Provisioned by Hosted Kubernetes Providers - -Options for managing nodes [hosted by a Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. - -### Registered/Imported Nodes - -{{% tabs %}} -{{% tab "Rancher v2.5" %}} -Although you can deploy workloads to a [registered cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/registered-clusters/) using Rancher, management of individual cluster nodes is limited to the supported options indicated in the [node options table]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/nodes/#cordoning-a-node). All other options to manage registered cluster nodes must take place outside of Rancher. -{{% /tab %}} -{{% tab "Rancher v2.0-v2.4" %}} -Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) using Rancher, management of individual cluster nodes is limited to the supported options indicated in the [node options table]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/nodes/#cordoning-a-node). All other options - to manage imported cluster nodes must take place outside of Rancher. -{{% /tab %}} -{{% /tabs %}} - -# Managing and Editing Individual Nodes - -Editing a node lets you: - -* Change its name -* Change its description -* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) - -To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**...**). - -# Viewing a Node in the Rancher API - -Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.x/en/api/). - -# Deleting a Node - -Use **Delete** to remove defective nodes from the cloud provider. - -When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) - ->**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. - -# Scaling Nodes - -For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) by using the scale controls. This option isn't available for other cluster types. - -# SSH into a Node Hosted by an Infrastructure Provider - -For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. - -1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. - -1. Find the node that you want to remote into. Select **⋮ > Download Keys**. - - **Step Result:** A ZIP file containing files used for SSH is downloaded. - -1. Extract the ZIP file to any location. - -1. Open Terminal. Change your location to the extracted ZIP file. - -1. Enter the following command: - - ``` - ssh -i id_rsa root@ - ``` - -# Cordoning a Node - -_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. - -# Draining a Node - -_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. - -- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. - -- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. - -You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. - -However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. - -### Aggressive and Safe Draining Options - -The node draining options are different based on your version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.2.x+" %}} -There are two drain modes: aggressive and safe. - -- **Aggressive Mode** - - In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. - - Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. - -- **Safe Mode** - - If a node has standalone pods or ephemeral data it will be cordoned but not drained. -{{% /tab %}} -{{% tab "Rancher before v2.2.x" %}} - -The following list describes each drain option: - -- **Even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet** - - These types of pods won't get rescheduled to a new node, since they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. Kubernetes forces you to choose this option (which will delete/evict these pods) or drain won't proceed. - -- **Even if there are DaemonSet-managed pods** - - Similar to above, if you have any daemonsets, drain would proceed only if this option is selected. Even when this option is on, pods won't be deleted since they'll immediately be replaced. On startup, Rancher currently has a few daemonsets running by default in the system, so this option is turned on by default. - -- **Even if there are pods using emptyDir** - - If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Similar to the first option, Kubernetes expects the implementation to decide what to do with these pods. Choosing this option will delete these pods. -{{% /tab %}} -{{% /tabs %}} - -### Grace Period - -The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. - -### Timeout - -The amount of time drain should continue to wait before giving up. - ->**Kubernetes Known Issue:** The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node before Kubernetes 1.12. - -### Drained and Cordoned State - -If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. - -If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. - -Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. - ->**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). - -# Labeling a Node to be Ignored by Rancher - -_Available as of 2.3.3_ - -Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. - -Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. - -In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. - -You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. - -> **Note:** There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. - -### Labeling Nodes to be Ignored with the Rancher UI - -To add a node that is ignored by Rancher, - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `ignore-node-name` setting and click **⋮ > Edit.** -1. Enter a name that Rancher will use to ignore nodes. All nodes with this name will be ignored. -1. Click **Save.** - -**Result:** Rancher will not wait to register nodes with this name. In the UI, the node will displayed with a grayed-out status. The node is still part of the cluster and can be listed with `kubectl`. - -If the setting is changed afterward, the ignored nodes will continue to be hidden. - -### Labeling Nodes to be Ignored with kubectl - -To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: - -``` -cattle.rancher.io/node-status: ignore -``` - -**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. - -If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. - -If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. - -If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings under `ignore-node-name`. diff --git a/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md b/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md deleted file mode 100644 index 261e1e117..000000000 --- a/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Adding a Pod Security Policy -weight: 80 ---- - -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. - -You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. - -1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **⋮ > Edit**. - -2. Expand **Cluster Options**. - -3. From **Pod Security Policy Support**, select **Enabled**. - - >**Note:** This option is only available for clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - - Rancher ships with [policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. - -5. Click **Save**. - -**Result:** The pod security policy is applied to the cluster and any projects within the cluster. - ->**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. -> ->To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md b/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md deleted file mode 100644 index f034677f0..000000000 --- a/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Projects and Kubernetes Namespaces with Rancher -description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces -weight: 2032 -aliases: - - /rancher/v2.x/en/concepts/projects/ - - /rancher/v2.x/en/tasks/projects/ - - /rancher/v2.x/en/tasks/projects/create-project/ - - /rancher/v2.x/en/tasks/projects/create-project/ ---- - -A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. - -A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -This section describes how projects and namespaces work with Rancher. It covers the following topics: - -- [About namespaces](#about-namespaces) -- [About projects](#about-projects) - - [The cluster's default project](#the-cluster-s-default-project) - - [The system project](#the-system-project) -- [Project authorization](#project-authorization) -- [Pod security policies](#pod-security-policies) -- [Creating projects](#creating-projects) -- [Switching between clusters and projects](#switching-between-clusters-and-projects) - -# About Namespaces - -A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) - -> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. - -Namespaces provide the following functionality: - -- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. -- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. - -You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. - -You can assign the following resources directly to namespaces: - -- [Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -For more information on creating and moving namespaces, see [Namespaces]({{}}/rancher/v2.x/en/project-admin/namespaces/). - -### Role-based access control issues with namespaces and kubectl - -Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. - -This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. - -If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.x/en/project-admin/namespaces/) to ensure that you will have permission to access the namespace. - -If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. - -# About Projects - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. - -You can use projects to perform actions such as: - -- Assign users to a group of namespaces (i.e., [project membership]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members)). -- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). -- Assign resources to the project. -- Assign Pod Security Policies. - -When you create a cluster, two projects are automatically created within it: - -- [Default Project](#the-cluster-s-default-project) -- [System Project](#the-system-project) - -### The Cluster's Default Project - -When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. - -If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. - -If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. - -### The System Project - -_Available as of v2.0.7_ - -When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. - -To open it, open the **Global** menu, and then select the `system` project for your cluster. - -The `system` project: - -- Is automatically created when you provision a cluster. -- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. -- Allows you to add more namespaces or move its namespaces to other projects. -- Cannot be deleted because it's required for cluster operations. - ->**Note:** In clusters where both: -> -> - The Canal network plug-in is in use. -> - The Project Network Isolation option is enabled. -> ->The `system` project overrides the Project Network Isolation option so that it can communicate with other projects, collect logs, and check health. - -# Project Authorization - -Standard users are only authorized for project access in two situations: - -- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. -- Standard users can access projects that they create themselves. - -# Pod Security Policies - -Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the [project level]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. - -# Creating Projects - -This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. - -1. [Name a new project.](#1-name-a-new-project) -2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) -3. [Recommended: Add project members.](#3-recommended-add-project-members) -4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) - -### 1. Name a New Project - -1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. - -1. From the main menu, choose **Projects/Namespaces**. Then click **Add Project**. - -1. Enter a **Project Name**. - -### 2. Optional: Select a Pod Security Policy - -This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). - -Assigning a PSP to a project will: - -- Override the cluster's default PSP. -- Apply the PSP to the project. -- Apply the PSP to any namespaces you add to the project later. - -### 3. Recommended: Add Project Members - -Use the **Members** section to provide other users with project access and roles. - -By default, your user is added as the project `Owner`. - ->**Notes on Permissions:** -> ->- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. ->- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). - -To add members: - -1. Click **Add Member**. -1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. -1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - -### 4. Optional: Add Resource Quotas - -_Available as of v2.1.0_ - -Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas). - -To add a resource quota, - -1. Click **Add Quota**. -1. Select a Resource Type. For more information, see [Resource Quotas.]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/). -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. -1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{}}/rancher/v2.x/en/project-admin/resource-quotas/) Note: This option is available as of v2.2.0. -1. Click **Create**. - -**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. - -| Field | Description | -| ----------------------- | -------------------------------------------------------------------------------------------------------- | -| Project Limit | The overall resource limit for the project. | -| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | - -# Switching between Clusters and Projects - -To switch between clusters and projects, use the **Global** drop-down available in the main menu. - -![Global Menu]({{}}/img/rancher/global-menu.png) - -Alternatively, you can switch between projects and clusters using the main menu. - -- To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. -- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. diff --git a/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md b/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md deleted file mode 100644 index 2215dc4b2..000000000 --- a/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Restoring a Cluster from Backup -weight: 2050 ---- - -_Available as of v2.2.0_ - -etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots), but [one-time snapshots]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). - -As of Rancher v2.4.0, clusters can also be restored to a prior Kubernetes version and cluster configuration. - -This section covers the following topics: - -- [Viewing Available Snapshots](#viewing-available-snapshots) -- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) -- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -## Viewing Available Snapshots - -The list of all available snapshots for the cluster is available. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -## Restoring a Cluster from a Snapshot - -If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. - -Restores changed in Rancher v2.4.0. - -{{% tabs %}} -{{% tab "Rancher v2.4.0+" %}} - -Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: - -- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -When rolling back to a prior Kubernetes version, the [upgrade strategy options]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. - -> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. In the **Restoration Type** field, choose one of the restore options described above. - -5. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -{{% /tab %}} -{{% tab "Rancher before v2.4.0" %}} - -> **Prerequisites:** -> -> - Make sure your etcd nodes are healthy. If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters in which Rancher used node pools to provision [nodes in an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. -> - To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshot. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -{{% /tab %}} -{{% /tabs %}} - -## Recovering etcd without a Snapshot - -If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. The cluster should have three etcd nodes to prevent a loss of quorum. If you want to recover your set of etcd nodes, follow these instructions: - -1. Keep only one etcd node in the cluster by removing all other etcd nodes. - -2. On the single remaining etcd node, run the following command: - - ``` - $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd - ``` - - This command outputs the running command for etcd, save this command to use later. - -3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. - - ``` - $ docker stop etcd - $ docker rename etcd etcd-old - ``` - -4. Take the saved command from Step 2 and revise it: - - - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. - - Add `--force-new-cluster` to the end of the command. - -5. Run the revised command. - -6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) and you want to reuse an old node, you are required to [clean up the nodes]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.x/en/cluster-admin/tools/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/_index.md deleted file mode 100644 index ed36ea5ae..000000000 --- a/content/rancher/v2.x/en/cluster-admin/tools/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Tools for Logging, Monitoring, and Visibility -weight: 2033 -aliases: - - /rancher/v2.x/en/tools/notifiers-and-alerts/ ---- - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - - -- [Logging](#logging) -- [Monitoring and Alerts](#monitoring-and-alerts) -- [Istio](#istio) -- [OPA Gatekeeper](#opa-gatekeeper) -- [CIS Scans](#cis-scans) - - - - -# Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debugg and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -Logging was improved in Rancher v2.5. - -- If you are using Rancher v2.5, refer to the logging documentation [here.]({{}}/rancher/v2.x/en/logging/v2.5) -- If you are using Rancher v2.0.x-v2.4.x, refer to the logging documentation [here.]({{}}/rancher/v2.x/en/logging/v2.0.x-v2.4.x) - -# Monitoring and Alerts - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -- If you are using Rancher v2.5, refer to the monitoring documentation [here.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5) -- If you are using Rancher v2.0.x-v2.4.x, refer to the monitoring documentation [here.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x) - -# Istio - -_Available as of v2.3_ - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. - -Rancher's integration with Istio was improved in Rancher v2.5. - -- If you are using Rancher v2.5, refer to the Istio documentation [here.]({{}}/rancher/v2.x/en/istio/v2.5) -- If you are using Rancher v2.3.x-v2.4.x, refer to the Istio documentation [here.]({{}}/rancher/v2.x/en/istio/v2.3.x-v2.4.x) - -# OPA Gatekeeper - -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.]({{}}/rancher/v2.x/en/opa-gatekeper) - - -# CIS Scans - -Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. - -If you are using Rancher v2.5, refer to the CIS scan documentation [here.]({{}}/rancher/v2.x/en/cis-scans/v2.5) - -If you are using Rancher v2.4, refer to the CIS scan documentation [here.]({{}}/rancher/v2.x/en/cis-scans/v2.4) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md b/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md deleted file mode 100644 index 15be6c9a0..000000000 --- a/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Upgrading and Rolling Back Kubernetes -weight: 70 ---- - -Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. - -Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation]({{}}/rke/latest/en/). - -This section covers the following topics: - -- [New Features](#new-features) -- [Tested Kubernetes Versions](#tested-kubernetes-versions) -- [How Upgrades Work](#how-upgrades-work) -- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) -- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) -- [Rolling Back](#rolling-back) -- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) - - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) - - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) - - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) - - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) -- [Troubleshooting](#troubleshooting) - -# New Features - -As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata) - -As of Rancher v2.4.0, - -- The ability to import K3s Kubernetes clusters into Rancher was added, along with the ability to upgrade Kubernetes when editing those clusters. For details, refer to the [section on imported clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) -- New advanced options are exposed in the Rancher UI for configuring the upgrade strategy of an RKE cluster: **Maximum Worker Nodes Unavailable** and **Drain nodes.** These options leverage the new cluster upgrade process of RKE v1.1.0, in which worker nodes are upgraded in batches, so that applications can remain available during cluster upgrades, under [certain conditions.](#maintaining-availability-for-applications-during-upgrades) - -# Tested Kubernetes Versions - -Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For example, Rancher v2.3.0 is was tested with Kubernetes v1.15.4, v1.14.7, and v1.13.11. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.3.0/) - -# How Upgrades Work - -RKE v1.1.0 changed the way that clusters are upgraded. - -In this section of the [RKE documentation,]({{}}/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. - - -# Recommended Best Practice for Upgrades - -{{% tabs %}} -{{% tab "Rancher v2.4+" %}} -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. This is achieved by selecting the **Restore etcd and Kubernetes version** option. This will return your cluster to the pre-upgrade kubernetes version before restoring the etcd snapshot. - -The restore operation will work on a cluster that is not in a healthy or active state. -{{% /tab %}} -{{% tab "Rancher before v2.4" %}} -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, restore the cluster from the etcd snapshot. - -The cluster cannot be downgraded to a previous Kubernetes version. -{{% /tab %}} -{{% /tabs %}} - -# Upgrading the Kubernetes Version - -> **Prerequisites:** -> -> - The options below are available only for [Rancher-launched RKE Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) and imported K3s Kubernetes clusters. -> - Before upgrading Kubernetes, [back up your cluster.]({{}}/rancher/v2.x/en/backups) - -1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. - -1. Expand **Cluster Options**. - -1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. - -1. Click **Save**. - -**Result:** Kubernetes begins upgrading for the cluster. - -# Rolling Back - -_Available as of v2.4_ - -A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: - -- [Backing up a cluster]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/#how-snapshots-work) -- [Restoring a cluster from backup]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/#restoring-a-cluster-from-a-snapshot) - -# Configuring the Upgrade Strategy - -As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements]({{}}/rke/latest/en/upgrades/maintaining-availability) are met. - -The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. - -### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI - -From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. - -By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. - -To change the default number or percentage of worker nodes, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Maxiumum Worker Nodes Unavailable** field. Enter the percentage of worker nodes that can be upgraded in a batch. Optionally, select **Count** from the drop-down menu and enter the maximum unavailable worker nodes as an integer. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -### Enabling Draining Nodes During Upgrades from the Rancher UI - -By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. - -To enable draining each node during a cluster upgrade, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** -1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/nodes/#aggressive-and-safe-draining-options) -1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. -1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -> **Note:** As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. - -### Maintaining Availability for Applications During Upgrades - -_Available as of RKE v1.1.0_ - -In [this section of the RKE documentation,]({{}}/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. - -### Configuring the Upgrade Strategy in the cluster.yml - -More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. - -For details, refer to [Configuring the Upgrade Strategy]({{}}/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. - -# Troubleshooting - -If a node doesn't come up after an upgrade, the `rke up` command errors out. - -No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. - -If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. - -A failed node could be in many different states: - -- Powered off -- Unavailable -- User drains a node while upgrade is in process, so there are no kubelets on the node -- The upgrade itself failed - -If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md deleted file mode 100644 index dccab7709..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: "Kubernetes Persistent Storage: Volumes and Storage Classes" -description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" -weight: 2031 -aliases: - - /rancher/v2.x/en/tasks/clusters/adding-storage/ - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ ---- -When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. - -The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](./how-storage-works) - -### Prerequisites - -To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. - -If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) - -For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. - -### Setting up Existing Storage - -The overall workflow for setting up existing storage is as follows: - -1. Set up your persistent storage. This may be storage in an infrastructure provider, or it could be your own storage. -2. Add a persistent volume (PV) that refers to the persistent storage. -3. Add a persistent volume claim (PVC) that refers to the PV. -4. Mount the PVC as a volume in your workload. - -For details and prerequisites, refer to [this page.](./attaching-existing-storage) - -### Dynamically Provisioning New Storage in Rancher - -The overall workflow for provisioning new storage is as follows: - -1. Add a StorageClass and configure it to use your storage provider. The StorageClass could refer to storage in an infrastructure provider, or it could refer to your own storage. -2. Add a persistent volume claim (PVC) that refers to the storage class. -3. Mount the PVC as a volume for your workload. - -For details and prerequisites, refer to [this page.](./provisioning-new-storage) - -### Longhorn Storage - -[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. - -Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. - -If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/1.0.2/what-is-longhorn/) - -Rancher v2.5 simplified the process of installing Longhorn on a Rancher-managed cluster. For more information, see [this page.]({{}}/rancher/v2.x/en/longhorn) - -### Provisioning Storage Examples - -We provide examples of how to provision storage with [NFS,](./examples/nfs) [vSphere,](./examples/vsphere) and [Amazon's EBS.](./examples/ebs) - -### GlusterFS Volumes - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](./glusterfs-volumes) - -### iSCSI Volumes - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](./iscsi-volumes) - -### hostPath Volumes -Before you create a hostPath volume, you need to set up an [extra_bind]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. - -### Migrating vSphere Cloud Provider from In-tree to Out-of-tree - -Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. - -For instructions on how to migrate from the in-tree vSphere cloud provider to out-of-tree, and manage the existing VMs post migration, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree) - - -### Related Links - -- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md deleted file mode 100644 index d47810429..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Setting up Existing Storage -weight: 1 ---- - -This section describes how to set up existing persistent storage for workloads in Rancher. - -> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -To set up storage, follow these steps: - -1. [Set up persistent storage.](#1-set-up-persistent-storage) -2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) -3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) -4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-volume-claim-as-a-volume-in-your-workload) - -### Prerequisites - -- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -### 1. Set up persistent storage - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../examples/vsphere) [NFS,](../examples/nfs) or Amazon's [EBS.](../examples/ebs) - -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.]({{}}/rancher/v2.x/en/longhorn) - -### 2. Add a persistent volume that refers to the persistent storage - -These steps describe how to set up a persistent volume at the cluster level in Kubernetes. - -1. From the cluster view, select **Storage > Persistent Volumes**. - -1. Click **Add Volume**. - -1. Enter a **Name** for the persistent volume. - -1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. - -1. Enter the **Capacity** of your volume in gigabytes. - -1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. - -1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. - -1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. - -1. Click **Save**. - -**Result:** Your new persistent volume is created. - -### 3. Add a persistent volume claim that refers to the persistent volume - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the project containing a workload that you want to add a persistent volume claim to. - -1. Then click the **Volumes** tab and click **Add Volume**. (In versions before v2.3.0, click **Workloads** on the main navigation bar, then **Volumes.**) - -1. Enter a **Name** for the volume claim. - -1. Select the [Namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the workload that you want to add the persistent storage to. - -1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. - -1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 4. Mount the persistent volume claim as a volume in your workload - -Mount PVCs to stateful workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -The following steps describe how to assign existing storage to a new workload that is a stateful set: - -1. From the **Project** view, go to the **Workloads** tab. -1. Click **Deploy.** -1. Enter a name for the workload. -1. Next to the **Workload Type** field, click **More Options.** -1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. -1. Choose the namespace where the workload will be deployed. -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -The following steps describe how to assign persistent storage to an existing workload: - -1. From the **Project** view, go to the **Workloads** tab. -1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/_index.md deleted file mode 100644 index fbc7451b5..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/_index.md +++ /dev/null @@ -1,433 +0,0 @@ ---- -title: Using an External Ceph Driver -weight: 10 ---- - -These instructions are about using the external Ceph driver in an RKE2 cluster. If you are using RKE, additional steps are required. For details, refer to [this section.](#using-the-ceph-driver-with-rke) - -- [Requirements](#requirements) -- [Using the Ceph Driver with RKE](#using-the-ceph-driver-with-rke) -- [Installing the ceph-csi driver on an RKE2 cluster](#installing-the-ceph-csi-driver-on-an-rke2-cluster) -- [Install the ceph-csi driver using Helm](#install-the-ceph-csi-driver-using-helm) -- [Creating RBD Ceph Resources](#creating-rbd-ceph-resources) -- [Configure RBD Ceph Access Secrets](#configure-rbd-ceph-access-secrets) - - [User Account](#user-account) - - [Admin Account](#admin-account) -- [Create RBD Testing Resources](#create-rbd-testing-resources) - - [Using RBD in Pods](#using-rbd-in-pods) - - [Using RBD in Persistent Volumes](#using-rbd-in-persistent-volumes) - - [Using RBD in Storage Classes](#using-rbd-in-storage-classes) - - [RKE2 Server/Master Provisioning](#rke2-server-master-provisioning) - - [RKE2 Agent/Worker provisioning](#rke2-agent-worker-provisioning) -- [Tested Versions](#tested-versions) -- [Troubleshooting](#troubleshooting) - -# Requirements - -Make sure ceph-common and xfsprogs packages are installed on SLE worker nodes. - -# Using the Ceph Driver with RKE - -The resources below are fully compatible with RKE based clusters, but there is a need to do an additional kubelet configuration for RKE. - -On RKE clusters, the kubelet component is running in a Docker container and doesn't have access to the host's kernel modules as rbd and libceph by default. - -To solve this limitation, you can either run `modprobe rbd` on worker nodes, or configure the kubelet containers to automatically mount the `/lib/modules` directory from the host into the container. - -For the kubelet configuration, put the following lines into the `cluster.yml` file prior to RKE cluster provisioning. You can also modify the `cluster.yml` later in the Rancher UI by clicking on **Edit Cluster > Edit as YAML** and restarting the worker nodes. - -```yaml -services: - kubelet: - extra_binds: - - '/lib/modules:/lib/modules:ro' -``` - -For more information about the `extra_binds` directive, refer to [this section.]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds) - -# Installing the ceph-csi driver on an RKE2 cluster - -> **Note:** These steps are needed for dynamic RBD provisioning only. - -For more information about the `ceph-csi-rbd` chart, refer to [this page.](https://github.com/ceph/ceph-csi/blob/devel/charts/ceph-csi-rbd/README.md) - -To get details about your SES cluster, run: - -``` -ceph mon dump -``` - -Read its output: - -``` -dumped monmap epoch 3 -epoch 3 -fsid 79179d9d-98d8-4976-ab2e-58635caa7235 -last_changed 2021-02-11T10:56:42.110184+0000 -created 2021-02-11T10:56:22.913321+0000 -min_mon_release 15 (octopus) -0: [v2:10.85.8.118:3300/0,v1:10.85.8.118:6789/0] mon.a -1: [v2:10.85.8.123:3300/0,v1:10.85.8.123:6789/0] mon.b -2: [v2:10.85.8.124:3300/0,v1:10.85.8.124:6789/0] mon.c -``` - -Later you'll need the fsid and mon addresses values. - -# Install the ceph-csi Driver Using Helm - -Run these commands: - -``` -helm repo add ceph-csi https://ceph.github.io/csi-charts -helm repo update -helm search repo ceph-csi -l -helm inspect values ceph-csi/ceph-csi-rbd > ceph-csi-rbd-values.yaml -``` - -Modify the `ceph-csi-rbd-values.yaml` file and keep there only the required changes: - -```yaml -# ceph-csi-rbd-values.yaml -csiConfig: - - clusterID: "79179d9d-98d8-4976-ab2e-58635caa7235" - monitors: - - "10.85.8.118:6789" - - "10.85.8.123:6789" - - "10.85.8.124:6789" -provisioner: - name: provisioner - replicaCount: 2 -``` - -Make sure the ceph monitors are reachable from the RKE2 cluster, for example, by ping. - -``` -kubectl create namespace ceph-csi-rbd -helm install --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml -kubectl rollout status deployment ceph-csi-rbd-provisioner -n ceph-csi-rbd -helm status ceph-csi-rbd -n ceph-csi-rbd -``` - -in case you'd like to modify the configuration directly via Helm, you may adapt the `ceph-csi-rbd-values.yaml` file and call: - -``` -helm upgrade \ - --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml -``` - -# Creating RBD Ceph Resources - -``` -# Create a ceph pool: -ceph osd pool create myPool 64 64 - -# Create a block device pool: -rbd pool init myPool - -# Create a block device image: -rbd create -s 2G myPool/image - -# Create a block device user and record the key: -ceph auth get-or-create-key client.myPoolUser mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=myPool" | tr -d '\n' | base64 -QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== - -# Encode the ceph user myPoolUser into a bash64 hash: -echo "myPoolUser" | tr -d '\n' | base64 -bXlQb29sVXNlcg== - -# Create a block device admin user and record the key: -ceph auth get-or-create-key client.myPoolAdmin mds 'allow *' mgr 'allow *' mon 'allow *' osd 'allow * pool=myPool' | tr -d '\n' | base64 -QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== - -# Encode the ceph user myPoolAdmin into a bash64 hash: -echo "myPoolAdmin" | tr -d '\n' | base64 -bXlQb29sQWRtaW4= -``` -# Configure RBD Ceph Access Secrets - -### User Account - -For static RBD provisioning (the image within the ceph pool must exist), run these commands: - -``` -cat > ceph-user-secret.yaml << EOF -apiVersion: v1 -kind: Secret -metadata: - name: ceph-user - namespace: default -type: kubernetes.io/rbd -data: - userID: bXlQb29sVXNlcg== - userKey: QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== -EOF - -kubectl apply -f ceph-user-secret.yaml -``` - -### Admin Account - -For dynamic RBD provisioning (used for automatic image creation within a given ceph pool), run these commands: - -``` -cat > ceph-admin-secret.yaml << EOF -apiVersion: v1 -kind: Secret -metadata: - name: ceph-admin - namespace: default -type: kubernetes.io/rbd -data: - userID: bXlQb29sQWRtaW4= - userKey: QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== -EOF - -kubectl apply -f ceph-admin-secret.yaml -``` - -# Create RBD Testing Resources - -### Using RBD in Pods - -``` -# pod -cat > ceph-rbd-pod-inline.yaml << EOF -apiVersion: v1 -kind: Pod -metadata: - name: ceph-rbd-pod-inline -spec: - containers: - - name: ceph-rbd-pod-inline - image: busybox - command: ["sleep", "infinity"] - volumeMounts: - - mountPath: /mnt/ceph_rbd - name: volume - volumes: - - name: volume - rbd: - monitors: - - 10.85.8.118:6789 - - 10.85.8.123:6789 - - 10.85.8.124:6789 - pool: myPool - image: image - user: myPoolUser - secretRef: - name: ceph-user - fsType: ext4 - readOnly: false -EOF - -kubectl apply -f ceph-rbd-pod-inline.yaml -kubectl get pod -kubectl exec pod/ceph-rbd-pod-inline -- df -k | grep rbd -``` - -### Using RBD in Persistent Volumes - -``` -# pod-pvc-pv -cat > ceph-rbd-pod-pvc-pv-allinone.yaml << EOF -apiVersion: v1 -kind: PersistentVolume -metadata: - name: ceph-rbd-pv -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - rbd: - monitors: - - 10.85.8.118:6789 - - 10.85.8.123:6789 - - 10.85.8.124:6789 - pool: myPool - image: image - user: myPoolUser - secretRef: - name: ceph-user - fsType: ext4 - readOnly: false ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: ceph-rbd-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi ---- -apiVersion: v1 -kind: Pod -metadata: - name: ceph-rbd-pod-pvc-pv -spec: - containers: - - name: ceph-rbd-pod-pvc-pv - image: busybox - command: ["sleep", "infinity"] - volumeMounts: - - mountPath: /mnt/ceph_rbd - name: volume - volumes: - - name: volume - persistentVolumeClaim: - claimName: ceph-rbd-pvc -EOF - -kubectl apply -f ceph-rbd-pod-pvc-pv-allinone.yaml -kubectl get pv,pvc,pod -kubectl exec pod/ceph-rbd-pod-pvc-pv -- df -k | grep rbd -``` - -### Using RBD in Storage Classes - -This example is for dynamic provisioning. The ceph-csi driver is needed. - -``` -# pod-pvc-sc -cat > ceph-rbd-pod-pvc-sc-allinone.yaml < /root/.bashrc << EOF -export PATH=$PATH:/var/lib/rancher/rke2/bin/ -export KUBECONFIG=/etc/rancher/rke2/rke2.yaml -EOF - -cat /var/lib/rancher/rke2/server/node-token -token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED -``` - -### RKE2 Agent/Worker provisioning - -``` -mkdir -p /etc/rancher/rke2/ - -cat > /etc/rancher/rke2/config.yaml << EOF -server: https://10.100.103.23:9345 -token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED -EOF - -curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - -systemctl enable --now rke2-agent.service -``` - -The cluster can be imported into Rancher from the Rancher UI by clicking **Global/Add Cluster > Other Cluster.** Then run the provided kubectl command on the server/master node. - -# Tested Versions - -OS for running RKE2 nodes: JeOS SLE15-SP2 with installed kernel-default-5.3.18-24.49 - -``` -kubectl version -Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-22T12:00:00Z", GoVersion:"go1.13.11", Compiler:"gc", Platform:"linux/amd64"} -Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7+rke2r1", GitCommit:"1dd5338295409edcfff11505e7bb246f0d325d15", GitTreeState:"clean", BuildDate:"2021-01-20T01:50:52Z", GoVersion:"go1.15.5b5", Compiler:"gc", Platform:"linux/amd64"} - -helm version -version.BuildInfo{Version:"3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.12"} -``` - -Kubernetes version on RKE2 cluster: v1.19.7+rke2r1 - -# Troubleshooting - -In case you are using SUSE's ceph-rook based on SES7, it might be useful to expose the monitors on hostNetwork by editing `rook-1.4.5/ceph/cluster.yaml` and setting `spec.network.hostNetwork=true`. - -Also for operating the ceph-rook cluster, it is useful to deploy a toolbox on the Kubernetes cluster where ceph-rook is provisioned by `kubectl apply -f rook-1.4.5/ceph/toolbox.yaml` Then all the ceph related commands can be executed in the toolbox pod, for example, by running `kubectl exec -it -n rook-ceph rook-ceph-tools-686d8b8bfb-2nvqp -- bash` - -Operating with the ceph - basic commands: - -``` -ceph osd pool stats -ceph osd pool delete myPool myPool --yes-i-really-really-mean-it -rbd list -p myPool -> csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 -> image -``` - -Delete the image: `rbd rm csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 -p myPool` - -CephFS commands in rook toolbox: - -``` -ceph -s -ceph fs ls -ceph fs fail cephfs -ceph fs rm cephfs --yes-i-really-mean-it -ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it -ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it -``` - -To prepare a cephfs filesystem, you can run this command on a rook cluster: - -``` -kubectl apply -f rook-1.4.5/ceph/filesystem.yaml -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md deleted file mode 100644 index 5bb96aa36..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Provisioning Storage Examples -weight: 3053 -aliases: - - /rancher/v2.x/en/tasks/clusters/adding-storage/provisioning-storage/ - - /rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/ ---- - -Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. - -For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: - -- [NFS](./nfs) -- [vSphere](./vsphere) -- [EBS](./ebs) diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md deleted file mode 100644 index b854daf0e..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Creating Persistent Storage in Amazon's EBS -weight: 3053 ---- - -This section describes how to set up Amazon's Elastic Block Store in EC2. - -1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** -1. Click **Create Volume.** -1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. -1. Click **Create Volume.** -1. Click **Close.** - -**Result:** Persistent storage has been created. - -For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md deleted file mode 100644 index 632169475..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: NFS Storage -weight: 3054 -aliases: - - /rancher/v2.x/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ ---- - -Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. - ->**Note:** -> ->- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/). -> ->- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. - ->**Recommended:** To simplify the process of managing firewall rules, use NFSv4. - -1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. - -1. Enter the following command: - - ``` - sudo apt-get install nfs-kernel-server - ``` - -1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. - - ``` - mkdir -p /nfs && chown nobody:nogroup /nfs - ``` - - The `-p /nfs` parameter creates a directory named `nfs` at root. - - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. - -1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. - - 1. Open `/etc/exports` using your text editor of choice. - 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. - - ``` - /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) - ``` - - **Tip:** You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` - - 1. Update the NFS table by entering the following command: - - ``` - exportfs -ra - ``` - -1. Open the ports used by NFS. - - 1. To find out what ports NFS is using, enter the following command: - - ``` - rpcinfo -p | grep nfs - ``` - 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: - - ``` - sudo ufw allow 2049 - ``` - -**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. - -## What's Next? - -Within Rancher, add the NFS server as a storage volume and/or storage class. After adding the server, you can use it for storage for your deployments. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md deleted file mode 100644 index 676861820..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: vSphere Storage -weight: 3055 -aliases: - - /rancher/v2.x/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ ---- - -To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). - -In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) - -- [Prerequisites](#prerequisites) -- [Creating a StorageClass](#creating-a-storageclass) -- [Creating a Workload with a vSphere Volume](#creating-a-workload-with-a-vsphere-volume) -- [Verifying Persistence of the Volume](#verifying-persistence-of-the-volume) -- [Why to Use StatefulSets Instead of Deployments](#why-to-use-statefulsets-instead-of-deployments) - -### Prerequisites - -In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/). - -### Creating a StorageClass - -> **Note:** -> -> The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. - -1. From the Global view, open the cluster where you want to provide vSphere storage. -2. From the main menu, select **Storage > Storage Classes**. Then click **Add Class**. -3. Enter a **Name** for the class. -4. Under **Provisioner**, select **VMWare vSphere Volume**. - - {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} - -5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. -5. Click **Save**. - -### Creating a Workload with a vSphere Volume - -1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). -2. For **Workload Type**, select **Stateful set of 1 pod**. -3. Expand the **Volumes** section and click **Add Volume**. -4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. -5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. -6. Enter the required **Capacity** for the volume. Then click **Define**. - - {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} - -7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. -8. Click **Launch** to create the workload. - -### Verifying Persistence of the Volume - -1. From the context menu of the workload you just created, click **Execute Shell**. -2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). -3. Create a file in the volume by executing the command `touch //data.txt`. -4. **Close** the shell window. -5. Click on the name of the workload to reveal detail information. -6. Open the context menu next to the Pod in the *Running* state. -7. Delete the Pod by selecting **Delete**. -8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. -9. Once the replacement pod is running, click **Execute Shell**. -10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. - - ![workload-persistent-data]({{}}/img/rancher/workload-persistent-data.png) - -### Why to Use StatefulSets Instead of Deployments - -You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. - -Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. - -Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. - -### Related Links - -- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) -- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md deleted file mode 100644 index c9e99f682..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: GlusterFS Volumes -weight: 5000 ---- - -> This section only applies to [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: - -- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) -- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) - -``` -docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version -``` - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/usr/bin/systemd-run:/usr/bin/systemd-run" -``` - -After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: - -``` -Detected OS with systemd -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md deleted file mode 100644 index fcb87bc10..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: How Persistent Storage Works -weight: 1 -aliases: - - /rancher/v2.x/en/tasks/workloads/add-persistent-volume-claim ---- - -A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. - -There are two ways to use persistent storage in Kubernetes: - -- Use an existing persistent volume -- Dynamically provision new persistent volumes - -To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. - -For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. - -![Setting Up New and Existing Persistent Storage]({{}}/img/rancher/rancher-storage.svg) - -For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) - -This section covers the following topics: - -- [About persistent volume claims](#about-persistent-volume-claims) - - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) -- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) - - [Binding PVs to PVCs](#binding-pvs-to-pvcs) -- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) - -# About Persistent Volume Claims - -Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. - -To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. - -Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** (In versions before v2.3.0, the PVCs are in the **Volumes** tab.) You can reuse these PVCs when creating deployments in the future. - -### PVCs are Required for Both New and Existing Persistent Storage - -A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. - -If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. - -If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. - -Rancher lets you create as many PVCs within a project as you'd like. - -You can mount PVCs to a deployment as you create it, or later, after the deployment is running. - -# Setting up Existing Storage with a PVC and PV - -Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. - -PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. - -### Binding PVs to PVCs - -When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - -> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. - -In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. - -To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. - -# Provisioning New Storage with a PVC and Storage Class - -Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. - -For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. - -The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. - diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md deleted file mode 100644 index 049a65421..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: iSCSI Volumes -weight: 6000 ---- - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. - -Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. - -If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: - -| Platform | Package Name | Install Command | -| ------------- | ----------------------- | -------------------------------------- | -| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | -| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | - - -After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/etc/iscsi:/etc/iscsi" - - "/sbin/iscsiadm:/sbin/iscsiadm" -``` diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md deleted file mode 100644 index ad1bfc9e0..000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Dynamically Provisioning New Storage in Rancher -weight: 2 ---- - -This section describes how to provision new persistent storage for workloads in Rancher. - -This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. - -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.]({{}}/rancher/v2.x/en/longhorn) - -To provision new storage for your workloads, follow these steps: - -1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage) -2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) -3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) - -### Prerequisites - -- To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- Make sure your storage provisioner is available to be enabled. - -The following storage provisioners are enabled by default: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/) - -### 1. Add a storage class and configure it to use your storage - -These steps describe how to set up a storage class at the cluster level. - -1. Go to the **Cluster Explorer** of the cluster for which you want to dynamically provision persistent storage volumes. - -1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. - -1. Enter a `Name` for your storage class. - -1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. - -1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. - -1. Click `Save`. - -**Result:** The storage class is available to be consumed by a PVC. - -For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). - -### 2. Add a persistent volume claim that refers to the storage class - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the **Cluster Manager** to the project containing a workload that you want to add a PVC to. - -1. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Then select the **Volumes** tab. Click **Add Volume**. - -1. Enter a **Name** for the volume claim. - -1. Select the [Namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the volume claim. - -1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** - -1. Go to the **Storage Class** drop-down and select the storage class that you created. - -1. Enter a volume **Capacity**. - -1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 3. Mount the persistent volume claim as a volume for your workload - -Mount PVCs to workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -To attach the PVC to a new workload, - -1. Create a workload as you would in [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). -1. For **Workload Type**, select **Stateful set of 1 pod**. -1. Expand the **Volumes** section and click **Add Volume > Use an Existing Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -To attach the PVC to an existing workload, - -1. Go to the project that has the workload that will have the PVC attached. -1. Go to the workload that will have persistent storage and click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Use an Existing Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. diff --git a/content/rancher/v2.x/en/cluster-provisioning/_index.md b/content/rancher/v2.x/en/cluster-provisioning/_index.md deleted file mode 100644 index 620bce21a..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/_index.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Setting up Kubernetes Clusters in Rancher -description: Provisioning Kubernetes Clusters -weight: 7 -aliases: - - /rancher/v2.x/en/concepts/clusters/ - - /rancher/v2.x/en/concepts/clusters/cluster-providers/ - - /rancher/v2.x/en/tasks/clusters/ ---- - -Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. - -This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. - -For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.x/en/overview/architecture/) page. - -This section covers the following topics: - - - -- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) -- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) - - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) - - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) -- [Registering Existing Clusters](#registering-existing-clusters) -- [Importing Existing Clusters](#importing-existing-clusters) - - - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} - -# Setting up Clusters in a Hosted Kubernetes Provider - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters) - -# Launching Kubernetes with Rancher - -Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. - -In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. - -These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. - -If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. - -For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) - -### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider - -Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. - -One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. - -The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. - -For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. - -You can bring any nodes you want to Rancher and use them to create a cluster. - -These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. - -# Importing Existing Clusters - -_Available from Rancher v2.0.x-v2.4.x_ - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -Note that Rancher does not automate the provisioning, scaling, or upgrade of imported clusters. Other Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. - -For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. - -In Rancher v2.4, it became possible to import a K3s cluster and upgrade Kubernetes by editing the cluster in the Rancher UI. - -For more information, refer to the section on [importing existing clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) - -### Importing and Editing K3s Clusters - -_Available as of Rancher v2.4.0_ - -[K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. K3s Kubernetes clusters can now be imported into Rancher. - -When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - -- The ability to upgrade the K3s version -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. - -For more information, refer to the section on [imported K3s clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) - -# Registering Existing Clusters - -_Available as of v2.5_ - -The cluster registration feature replaces the feature to import clusters. - -Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. - -When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -For more information, see [this page.](./registered-clusters) diff --git a/content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md b/content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md deleted file mode 100644 index 80271f0ae..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Action | [Rancher launched Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) | -| --- | --- | ---| ---| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) | ✓ | ✓ | * | -| [Managing Nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.x/en/catalog/) | ✓ | ✓ | ✓ | -| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)]({{}}/rancher/v2.x/en/cluster-admin/tools/) | ✓ | ✓ | ✓ | -| [Cloning Clusters]({{}}/rancher/v2.x/en/cluster-admin/cloning-clusters/)| ✓ | ✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/) | ✓ | | | -| [Ability to back up your Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/) | ✓ | | | -| [Ability to recover and restore etcd]({{}}/rancher/v2.x/en/cluster-admin/restoring-etcd/) | ✓ | | | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.x/en/cluster-admin/pod-security-policy/) | ✓ | | | -| [Running Security Scans]({{}}/rancher/v2.x/en/security/security-scan/) | ✓ | | | - -\* Cluster configuration options can't be edited for imported clusters, except for K3s clusters. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md deleted file mode 100644 index 5bcbab42f..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Setting up Clusters from Hosted Kubernetes Providers -weight: 3 ---- - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-prem or in an infrastructure provider. - -Rancher supports the following Kubernetes providers: - -Kubernetes Providers | Available as of | - --- | --- | -[Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) | v2.0.0 | -[Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) | v2.0.0 | -[Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) | v2.0.0 | -[Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) | v2.2.0 | -[Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) | v2.2.0 | -[Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) | v2.2.0 | - -## Hosted Kubernetes Provider Authentication - -When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: - -- [Creating a GKE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke) -- [Creating an EKS Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks) -- [Creating an AKS Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks) -- [Creating an ACK Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack) -- [Creating a TKE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke) -- [Creating a CCE Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce) diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md deleted file mode 100644 index 75edd05e4..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Creating an Aliyun ACK Cluster -shortTitle: Alibaba Cloud Container Service for Kubernetes -weight: 2120 ---- - -_Available as of v2.2.0_ - -You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. - -## Prerequisites - ->**Note** ->Deploying to ACK will incur charges. - -1. In Aliyun, activate the following services in their respective consoles. - - - [Container Service](https://cs.console.aliyun.com) - - [Resource Orchestration Service](https://ros.console.aliyun.com) - - [RAM](https://ram.console.aliyun.com) - -2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. - -3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). - -4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. - -## Create an ACK Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Alibaba ACK**. - -1. Enter a **Cluster Name**. - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. - -1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. - -1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. - -1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. - -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md deleted file mode 100644 index 666b03b74..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Creating an AKS Cluster -shortTitle: Azure Kubernetes Service -weight: 2115 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-azure-container-service/ ---- - -You can use Rancher to create a cluster hosted in Microsoft Azure Kubernetes Service (AKS). - -## Prerequisites in Microsoft Azure - ->**Note** ->Deploying to AKS will incur charges. - -To interact with Azure APIs, an AKS cluster requires an Azure Active Directory (AD) service principal. The service principal is needed to dynamically create and manage other Azure resources, and it provides credentials for your cluster to communicate with AKS. For more information about the service principal, refer to the [AKS documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal). - -Before creating the service principal, you need to obtain the following information from the [Microsoft Azure Portal](https://portal.azure.com): - -- Your subscription ID -- Your tenant ID -- An app ID (also called a client ID) -- Client secret -- A resource group - -The below sections describe how to set up these prerequisites using either the Azure command line tool or the Azure portal. - -### Setting Up the Service Principal with the Azure Command Line Tool - -You can create the service principal by running this command: - -``` -az ad sp create-for-rbac --skip-assignment -``` - -The result should show information about the new service principal: -``` -{ - "appId": "xxxx--xxx", - "displayName": "", - "name": "http://", - "password": "", - "tenant": "" -} -``` - -You also need to add roles to the service principal so that it has privileges for communication with the AKS API. It also needs access to create and list virtual networks. - -Below is an example command for assigning the Contributor role to a service principal. Contributors can manage anything on AKS but cannot give access to others: - -``` -az role assignment create \ - --assignee $appId \ - --scope /subscriptions/$/resourceGroups/$ \ - --role Contributor -``` - -You can also create the service principal and give it Contributor privileges by combining the two commands into one. In this command, the scope needs to provide a full path to an Azure resource: - -``` -az ad sp create-for-rbac \ - --scope /subscriptions/$/resourceGroups/$ \ - --role Contributor -``` - -### Setting Up the Service Principal from the Azure Portal - -You can also follow these instructions to set up a service principal and give it role-based access from the Azure Portal. - -1. Go to the Microsoft Azure Portal [home page](https://portal.azure.com). - -1. Click **Azure Active Directory.** - -1. Click **App registrations.** - -1. Click **New registration.** - -1. Enter a name. This will be the name of your service principal. - -1. Optional: Choose which accounts can use the service principal. - -1. Click **Register.** - -1. You should now see the name of your service principal under **Azure Active Directory > App registrations.** - -1. Click the name of your service principal. Take note of the tenant ID and application ID (also called app ID or client ID) so that you can use it when provisioning your AKS cluster. Then click **Certificates & secrets.** - -1. Click **New client secret.** - -1. Enter a short description, pick an expiration time, and click **Add.** Take note of the client secret so that you can use it when provisioning the AKS cluster. - -**Result:** You have created a service principal and you should be able to see it listed in the **Azure Active Directory** section under **App registrations.** You still need to give the service principal access to AKS. - -To give role-based access to your service principal, - -1. Click **All Services** in the left navigation bar. Then click **Subscriptions.** - -1. Click the name of the subscription that you want to associate with your Kubernetes cluster. Take note of the subscription ID so that you can use it when provisioning your AKS cluster. - -1. Click **Access Control (IAM).** - -1. In the **Add role assignment** section, click **Add.** - -1. In the **Role** field, select a role that will have access to AKS. For example, you can use the **Contributor** role, which has permission to manage everything except for giving access to other users. - -1. In the **Assign access to** field, select **Azure AD user, group, or service principal.** - -1. In the **Select** field, select the name of your service principal and click **Save.** - -**Result:** Your service principal now has access to AKS. - - -## Create the AKS Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Azure Kubernetes Service**. - -1. Enter a **Cluster Name**. - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Use your subscription ID, tenant ID, app ID, and client secret to give your cluster access to AKS. If you don't have all of that information, you can retrieve it using these instructions: - - **App ID and tenant ID:** To get the app ID and tenant ID, you can go to the Azure Portal, then click **Azure Active Directory**, then click **App registrations,** then click the name of the service principal. The app ID and tenant ID are both on the app registration detail page. - - **Client secret:** If you didn't copy the client secret when creating the service principal, you can get a new one if you go to the app registration detail page, then click **Certificates & secrets**, then click **New client secret.** - - **Subscription ID:** You can get the subscription ID is available in the portal from **All services > Subscriptions.** - -1. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** - -1. Complete the **Account Access** form using the output from your Service Principal. This information is used to authenticate with Azure. - -1. Use **Nodes** to provision each node in your cluster and choose a geographical region. - - [Microsoft Documentation: How to create and use an SSH public and private key pair](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys) -
-1. Click **Create**. -
-1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md deleted file mode 100644 index f59a024d8..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Creating a Huawei CCE Cluster -shortTitle: Huawei Cloud Kubernetes Service -weight: 2130 ---- - -_Available as of v2.2.0_ - -You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. - -## Prerequisites in Huawei - ->**Note** ->Deploying to CCE will incur charges. - -1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). - -2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). - -## Limitations - -Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. - -## Create the CCE Cluster - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Huawei CCE**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. Fill in the cluster configuration. For help filling out the form, refer to [Huawei CCE Configuration.](#huawei-cce-configuration) -1. Fill the following node configuration of the cluster. For help filling out the form, refer to [Node Configuration.](#node-configuration) -1. Click **Create** to create the CCE cluster. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -# Huawei CCE Configuration - -|Settings|Description| -|---|---| -| Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | -| Description | The description of the cluster. | -| Master Version | The Kubernetes version. | -| Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | -| High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | -| Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | -| Container Network CIDR | Network CIDR for the cluster. | -| VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | -| Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | -| External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | -| Cluster Label | The labels for the cluster. | -| Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | - -**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -# Node Configuration - -|Settings|Description| -|---|---| -| Zone | The available zone at where the node(s) of the cluster is deployed. | -| Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | -| Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | -| Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | -| Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | -| Data Volume Size | Data volume size for the cluster node(s) | -| Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | -| Root Volume Size | Root volume size for the cluster node(s) | -| Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | -| Node Count | The node count of the cluster | -| Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | -| SSH Key Name | The ssh key for the cluster node(s) | -| EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | -| EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | -| EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | -| EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | -| EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | -| EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | -| Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | -| Node Label | The labels for the cluster node(s). | \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md deleted file mode 100644 index 96145e926..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ /dev/null @@ -1,742 +0,0 @@ ---- -title: Creating an EKS Cluster -shortTitle: Amazon EKS -weight: 2110 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-eks/ ---- - -Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). - -- [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) - - [Amazon VPC](#amazon-vpc) - - [IAM Policies](#iam-policies) -- [Architecture](#architecture) -- [Create the EKS Cluster](#create-the-eks-cluster) -- [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) -- [Troubleshooting](#troubleshooting) -- [AWS Service Events](#aws-service-events) -- [Security and Compliance](#security-and-compliance) -- [Tutorial](#tutorial) -- [Minimum EKS Permissions](#minimum-eks-permissions) - - [Service Role Permissions](#service-role-permissions) - - [VPC Permissions](#vpc-permissions) -- [Syncing](#syncing) - -# Prerequisites in Amazon Web Services - ->**Note** ->Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). - -To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate [permissions.](#minimum-eks-permissions) For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). - -### Amazon VPC - -An Amazon VPC is required to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. You can set one up yourself and provide it during cluster creation in Rancher. If you do not provide one during creation, Rancher will create one. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). - -### IAM Policies - -Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. - -1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - -2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. The minimum permissions required for an EKS cluster are listed [here.](#minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. - -3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. - -> **Note:** It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. - -For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). - -# Architecture - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -# Create the EKS Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Amazon EKS**. - -1. Enter a **Cluster Name.** - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Fill out the rest of the form. For help, refer to the [configuration reference.](#eks-cluster-configuration-reference) - -1. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -# EKS Cluster Configuration Reference - -### Changes in Rancher v2.5 - -More EKS options can be configured when you create an EKS cluster in Rancher, including the following: - -- Managed node groups -- Desired size, minimum size, maximum size (requires the Cluster Autoscaler to be installed) -- Control plane logging -- Secrets encryption with KMS - -The following capabilities have been added for configuring EKS clusters in Rancher: - -- GPU support -- Exclusively use managed nodegroups that come with the most up-to-date AMIs -- Add new nodes -- Upgrade nodes -- Add and remove node groups -- Disable and enable private access -- Add restrictions to public access -- Use your cloud credentials to create the EKS cluster instead of passing in your access key and secret key - -Due to the way that the cluster data is synced with EKS, if the cluster is modified from another source, such as in the EKS console, and in Rancher within five minutes, it could cause some changes to be overwritten. For information about how the sync works and how to configure it, refer to [this section](#syncing). - -{{% tabs %}} -{{% tab "Rancher v2.5.6+" %}} - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Secrets Encryption - - - -Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) - -### API Server Endpoint Access - - - -Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Private-only API Endpoints - -If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. - -There are two ways to avoid this extra manual step: -- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. -- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). - -### Public Access Endpoints - - - -Optionally limit access to the public endpoint via explicit CIDR blocks. - -If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. - -One of the following is required to enable private access: -- Rancher's IP must be part of an allowed CIDR block -- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group - -For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Subnet - - - -| Option | Description | -| ------- | ------------ | -| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | -| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Logging - - - -Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. - -Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. - -For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - -### Managed Node Groups - - - -Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. - -For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - -#### Bring your own launch template - -A launch template ID and version can be provided in order to easily configure the EC2 instances in a node group. If a launch template is provided, then none of the settings below will be configurable in Rancher. Therefore, using a launch template would require that all the necessary and desired settings from the list below would need to be specified in the launch template. Also note that if a launch template ID and version is provided, then only the template version can be updated. Using a new template ID would require creating a new managed node group. - -| Option | Description | Required/Optional | -| ------ | ----------- | ----------------- | -| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | Required | -| Image ID | Specify a custom AMI for the nodes. Custom AMIs used with EKS must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) | Optional | -| Node Volume Size | The launch template must specify an EBS volume with the desired size | Required | -| SSH Key | A key to be added to the instances to provide SSH access to the nodes | Optional | -| User Data | Cloud init script in [MIME multi-part format](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data) | Optional | -| Instance Resource Tags | Tag each EC2 instance in the node group | Optional | - -#### Rancher-managed launch templates - -If you do not specify a launch template, then you will be able to configure the above options in the Rancher UI and all of them can be updated after creation. In order to take advantage of all of these options, Rancher will create and manage a launch template for you. Each cluster in Rancher will have one Rancher-managed launch template and each managed node group that does not have a specified launch template will have one version of the managed launch template. The name of this launch template will have the prefix "rancher-managed-lt-" followed by the display name of the cluster. In addition, the Rancher-managed launch template will be tagged with the key "rancher-managed-template" and value "do-not-modify-or-delete" to help identify it as Rancher-managed. It is important that this launch template and its versions not be modified, deleted, or used with any other clusters or managed node groups. Doing so could result in your node groups being "degraded" and needing to be destroyed and recreated. - -#### Custom AMIs - -If you specify a custom AMI, whether in a launch template or in Rancher, then the image must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) and you must provide user data to [bootstrap the node](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami). This is considered an advanced use case and understanding the requirements is imperative. - -If you specify a launch template that does not contain a custom AMI, then Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version and selected region. You can also select a [GPU enabled instance](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) for workloads that would benefit from it. - ->**Note** ->The GPU enabled instance setting in Rancher is ignored if a custom AMI is provided, either in the dropdown or in a launch template. - -#### Spot instances - -Spot instances are now [supported by EKS](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot). If a launch template is specified, Amazon recommends that the template not provide an instance type. Instead, Amazon recommends providing multiple instance types. If the "Request Spot Instances" checkbox is enabled for a node group, then you will have the opportunity to provide multiple instance types. - ->**Note** ->Any selection you made in the instance type dropdown will be ignored in this situation and you must specify at least one instance type to the "Spot Instance Types" section. Furthermore, a launch template used with EKS cannot request spot instances. Requesting spot instances must be part of the EKS configuration. - -#### Node Group Settings - -The following settings are also configurable. All of these except for the "Node Group Name" are editable after the node group is created. - -| Option | Description | -| ------- | ------------ | -| Node Group Name | The name of the node group. | -| Desired ASG Size | The desired number of instances. | -| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Labels | Kubernetes labels applied to the nodes in the managed node group. | -| Tags | These are tags for the managed node group and do not propagate to any of the associated resources. | - - -{{% /tab %}} -{{% tab "Rancher v2.5.0-v2.5.5" %}} - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Secrets Encryption - - - -Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) - -### API Server Endpoint Access - - - -Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Private-only API Endpoints - -If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. - -There are two ways to avoid this extra manual step: -- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. -- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). - -### Public Access Endpoints - - - -Optionally limit access to the public endpoint via explicit CIDR blocks. - -If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. - -One of the following is required to enable private access: -- Rancher's IP must be part of an allowed CIDR block -- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group - -For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Subnet - - - -| Option | Description | -| ------- | ------------ | -| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | -| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Logging - - - -Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. - -Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. - -For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - -### Managed Node Groups - - - -Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. - -For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - -Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version. You can configure whether the AMI has GPU enabled. - -| Option | Description | -| ------- | ------------ | -| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | -| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | - -{{% /tab %}} -{{% tab "Rancher before v2.5" %}} - - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Access Key | Enter the access key that you created for your IAM policy. | -| Secret Key | Enter the secret key that you created for your IAM policy. | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Public IP for Worker Nodes - - - -Your selection for this option determines what options are available for **VPC & Subnet**. - -Option | Description --------|------------ -Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. -No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. - -### VPC & Subnet - - - -The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) - -Option | Description - -------|------------ - Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. - Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - - -If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. - -{{% accordion id="yes" label="Click to expand" %}} - -If you're using **Custom: Choose from your existing VPC and Subnets**: - -(If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) - -1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -1. Click **Next: Select Security Group**. -{{% /accordion %}} - -If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. -{{% accordion id="no" label="Click to expand" %}} -Follow the steps below. - ->**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -{{% /accordion %}} - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Instance Options - - - -Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. - -Option | Description --------|------------ -Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. -Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. -Desired ASG Size | The number of instances that your cluster will provision. -User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ - -{{% /tab %}} -{{% /tabs %}} - - -# Troubleshooting - -If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) - -If an unauthorized error is returned while attempting to modify or register the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) - -For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). - -# AWS Service Events - -To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). - -# Security and Compliance - -By default only the IAM user or role that created a cluster has access to it. Attempting to access the cluster with any other user or role without additional configuration will lead to an error. In Rancher, this means using a credential that maps to a user or role that was not used to create the cluster will cause an unauthorized error. For example, an EKSCtl cluster will not register in Rancher unless the credentials used to register the cluster match the role or user used by EKSCtl. Additional users and roles can be authorized to access a cluster by being added to the aws-auth configmap in the kube-system namespace. For a more in-depth explanation and detailed instructions, please see this [documentation](https://aws.amazon.com/premiumsupport/knowledge-center/amazon-eks-cluster-access/). - -For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). - -# Tutorial - -This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. - -# Minimum EKS Permissions - -Documented here is a minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. Additional permissions are required for Rancher to provision the `Service Role` and `VPC` resources. Optionally these resources can be created **before** the cluster creation and will be selectable when defining the cluster configuration. - -Resource | Description ----------|------------ -Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#service-role-permissions). -VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#vpc-permissions). - - -Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "EC2Permisssions", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances", - "ec2:RevokeSecurityGroupIngress", - "ec2:RevokeSecurityGroupEgress", - "ec2:DescribeVpcs", - "ec2:DescribeTags", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeRouteTables", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeKeyPairs", - "ec2:DescribeInternetGateways", - "ec2:DescribeImages", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeAccountAttributes", - "ec2:DeleteTags", - "ec2:DeleteSecurityGroup", - "ec2:DeleteKeyPair", - "ec2:CreateTags", - "ec2:CreateSecurityGroup", - "ec2:CreateLaunchTemplateVersion", - "ec2:CreateLaunchTemplate", - "ec2:CreateKeyPair", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:AuthorizeSecurityGroupEgress" - ], - "Resource": "*" - }, - { - "Sid": "CloudFormationPermisssions", - "Effect": "Allow", - "Action": [ - "cloudformation:ListStacks", - "cloudformation:ListStackResources", - "cloudformation:DescribeStacks", - "cloudformation:DescribeStackResources", - "cloudformation:DescribeStackResource", - "cloudformation:DeleteStack", - "cloudformation:CreateStackSet", - "cloudformation:CreateStack" - ], - "Resource": "*" - }, - { - "Sid": "IAMPermissions", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "iam:ListRoles", - "iam:ListRoleTags", - "iam:ListInstanceProfilesForRole", - "iam:ListInstanceProfiles", - "iam:ListAttachedRolePolicies", - "iam:GetRole", - "iam:GetInstanceProfile", - "iam:DetachRolePolicy", - "iam:DeleteRole", - "iam:CreateRole", - "iam:AttachRolePolicy" - ], - "Resource": "*" - }, - { - "Sid": "KMSPermisssions", - "Effect": "Allow", - "Action": "kms:ListKeys", - "Resource": "*" - }, - { - "Sid": "EKSPermisssions", - "Effect": "Allow", - "Action": [ - "eks:UpdateNodegroupVersion", - "eks:UpdateNodegroupConfig", - "eks:UpdateClusterVersion", - "eks:UpdateClusterConfig", - "eks:UntagResource", - "eks:TagResource", - "eks:ListUpdates", - "eks:ListTagsForResource", - "eks:ListNodegroups", - "eks:ListFargateProfiles", - "eks:ListClusters", - "eks:DescribeUpdate", - "eks:DescribeNodegroup", - "eks:DescribeFargateProfile", - "eks:DescribeCluster", - "eks:DeleteNodegroup", - "eks:DeleteFargateProfile", - "eks:DeleteCluster", - "eks:CreateNodegroup", - "eks:CreateFargateProfile", - "eks:CreateCluster" - ], - "Resource": "*" - } - ] -} -``` - -### Service Role Permissions - -Rancher will create a service role with the following trust policy: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "sts:AssumeRole", - "Principal": { - "Service": "eks.amazonaws.com" - }, - "Effect": "Allow", - "Sid": "" - } - ] -} -``` - -This role will also have two role policy attachments with the following policies ARNs: - -``` -arn:aws:iam::aws:policy/AmazonEKSClusterPolicy -arn:aws:iam::aws:policy/AmazonEKSServicePolicy -``` - -Permissions required for Rancher to create service role on users behalf during the EKS cluster creation process. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "IAMPermisssions", - "Effect": "Allow", - "Action": [ - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreateRole", - "iam:CreateServiceLinkedRole", - "iam:DeleteInstanceProfile", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfiles", - "iam:ListInstanceProfilesForRole", - "iam:ListRoles", - "iam:ListRoleTags", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile" - ], - "Resource": "*" - } - ] -} -``` - -### VPC Permissions - -Permissions required for Rancher to create VPC and associated resources. - -```json -{ - "Sid": "VPCPermissions", - "Effect": "Allow", - "Action": [ - "ec2:ReplaceRoute", - "ec2:ModifyVpcAttribute", - "ec2:ModifySubnetAttribute", - "ec2:DisassociateRouteTable", - "ec2:DetachInternetGateway", - "ec2:DescribeVpcs", - "ec2:DeleteVpc", - "ec2:DeleteTags", - "ec2:DeleteSubnet", - "ec2:DeleteRouteTable", - "ec2:DeleteRoute", - "ec2:DeleteInternetGateway", - "ec2:CreateVpc", - "ec2:CreateSubnet", - "ec2:CreateSecurityGroup", - "ec2:CreateRouteTable", - "ec2:CreateRoute", - "ec2:CreateInternetGateway", - "ec2:AttachInternetGateway", - "ec2:AssociateRouteTable" - ], - "Resource": "*" -} -``` - - -# Syncing - -Syncing is the feature that causes Rancher to update its EKS clusters' values so they are up to date with their corresponding cluster object in the EKS console. This enables Rancher to not be the sole owner of an EKS cluster’s state. Its largest limitation is that processing an update from Rancher and another source at the same time or within 5 minutes of one finishing may cause the state from one source to completely overwrite the other. - -### How it works - -There are two fields on the Rancher Cluster object that must be understood to understand how syncing works: - -1. EKSConfig which is located on the Spec of the Cluster. -2. UpstreamSpec which is located on the EKSStatus field on the Status of the Cluster. - -Both of which are defined by the struct EKSClusterConfigSpec found in the eks-operator project: https://github.com/rancher/eks-operator/blob/master/pkg/apis/eks.cattle.io/v1/types.go - -All fields with the exception of DisplayName, AmazonCredentialSecret, Region, and Imported are nillable on the EKSClusterConfigSpec. - -The EKSConfig represents desired state for its non-nil values. Fields that are non-nil in the EKSConfig can be thought of as “managed".When a cluster is created in Rancher, all fields are non-nil and therefore “managed”. When a pre-existing cluster is registered in rancher all nillable fields are nil and are not “managed”. Those fields become managed once their value has been changed by Rancher. - -UpstreamSpec represents the cluster as it is in EKS and is refreshed on an interval of 5 minutes. After the UpstreamSpec has been refreshed rancher checks if the EKS cluster has an update in progress. If it is updating, nothing further is done. If it is not currently updating, any “managed” fields on EKSConfig are overwritten with their corresponding value from the recently updated UpstreamSpec. - -The effective desired state can be thought of as the UpstreamSpec + all non-nil fields in the EKSConfig. This is what is displayed in the UI. - -If Rancher and another source attempt to update an EKS cluster at the same time or within the 5 minute refresh window of an update finishing, then it is likely any “managed” fields can be caught in a race condition. For example, a cluster may have PrivateAccess as a managed field. If PrivateAccess is false and then enabled in EKS console, then finishes at 11:01, and then tags are updated from Rancher before 11:05 the value will likely be overwritten. This would also occur if tags were updated while the cluster was processing the update. If the cluster was registered and the PrivateAccess fields was nil then this issue should not occur in the aforementioned case. - -### Configuring the Refresh Interval - -It is possible to change the refresh interval through the setting “eks-refresh-cron". This setting accepts values in the Cron format. The default is `*/5 * * * *`. The shorter the refresh window is the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md deleted file mode 100644 index 898b8db3c..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Creating a GKE Cluster -shortTitle: Google Kubernetes Engine -weight: 2105 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-gke/ ---- - -## Prerequisites in Google Kubernetes Engine - ->**Note** ->Deploying to GKE will incur charges. - -Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. - -The service account requires the following roles: - -- **Compute Viewer:** `roles/compute.viewer` -- **Project Viewer:** `roles/viewer` -- **Kubernetes Engine Admin:** `roles/container.admin` -- **Service Account User:** `roles/iam.serviceAccountUser` - -[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) - -## Create the GKE Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Google Kubernetes Engine**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Either paste your service account private key in the **Service Account** text box or **Read from a file**. Then click **Next: Configure Nodes**. - - >**Note:** After submitting your private key, you may have to enable the Google Kubernetes Engine API. If prompted, browse to the URL displayed in the Rancher UI to enable the API. - -6. Select your **Cluster Options** -7. Customize your **Node Options** - * Enabling the Auto Upgrade feature for Nodes is not recommended. -8. Select your **Security Options** -9. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md deleted file mode 100644 index fa85750ca..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Creating a Tencent TKE Cluster -shortTitle: Tencent Kubernetes Engine -weight: 2125 ---- - -_Available as of v2.2.0_ - -You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.x/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. - -## Prerequisites in Tencent - ->**Note** ->Deploying to TKE will incur charges. - -1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. - -2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). - -3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. - -4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. - -## Create a TKE Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Tencent TKE**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites-in-tencent). - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Region | From the drop-down chooses the geographical region in which to build your cluster. | - | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | - | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | - -6. Click `Next: Configure Cluster` to set your TKE cluster configurations. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | - | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | - | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | - | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | - - **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Availability Zone | Choose the availability zone of the VPC region. | - | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | - | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | - -8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. - - Option | Description - -------|------------ - Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 - Security Group | Security group ID, default does not bind any security groups. - Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). - Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. - Data Disk Type | Data disk type, default value to the SSD cloud drive - Data Disk Size | Data disk size (GB), the step size is 10 - Band Width Type | Type of bandwidth, PayByTraffic or PayByHour - Band Width | Public network bandwidth (Mbps) - Key Pair | Key id, after associating the key can be used to logging to the VM node - -9. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md deleted file mode 100644 index abca96447..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Importing Existing Clusters -description: Learn how you can create a cluster in Rancher by importing an existing Kubernetes cluster. Then, you can manage it using Rancher -metaTitle: 'Kubernetes Cluster Management' -metaDescription: 'Learn how you can import an existing Kubernetes cluster and then manage it using Rancher' -weight: 5 -aliases: - - /rancher/v2.x/en/tasks/clusters/import-cluster/ ---- - -_Available as of v2.0.x-v2.4.x_ - -When managing an imported cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. Note that Rancher does not automate the provisioning or scaling of imported clusters. - -For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. - -Rancher v2.4 added the capability to import a K3s cluster into Rancher, as well as the ability to upgrade Kubernetes by editing the cluster in the Rancher UI. - -- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) -- [Features](#features) -- [Prerequisites](#prerequisites) -- [Importing a cluster](#importing-a-cluster) -- [Imported K3s clusters](#imported-k3s-clusters) - - [Additional features for imported K3s clusters](#additional-features-for-imported-k3s-clusters) - - [Configuring a K3s Cluster to Enable Importation to Rancher](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) - - [Debug Logging and Troubleshooting for Imported K3s clusters](#debug-logging-and-troubleshooting-for-imported-k3s-clusters) -- [Annotating imported clusters](#annotating-imported-clusters) - -# Changes in Rancher v2.5 - -In Rancher v2.5, the cluster registration feature replaced the feature to import clusters. - -Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. - -When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -# Features - -After importing a cluster, the cluster owner can: - -- [Manage cluster access]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and [logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) -- Enable [Istio]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) -- Use [pipelines]({{}}/rancher/v2.x/en/project-admin/pipelines/) -- Configure [alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) and [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) -- Manage [projects]({{}}/rancher/v2.x/en/project-admin/) and [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) - -After importing a K3s cluster, the cluster owner can also [upgrade Kubernetes from the Rancher UI.]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/) - -# Prerequisites - -If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to import the cluster into Rancher. - -In order to apply the privilege, you need to run: - -```plain -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole cluster-admin \ - --user [USER_ACCOUNT] -``` - -before running the `kubectl` command to import the cluster. - -By default, GKE users are not given this privilege, so you will need to run the command before importing GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - -> If you are importing a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) - -# Importing a Cluster - -1. From the **Clusters** page, click **Add Cluster**. -2. Choose **Import**. -3. Enter a **Cluster Name**. -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user.} -5. Click **Create**. -6. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. -7. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. -8. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. -9. When you finish running the command(s) on your node, click **Done**. - -**Result:** - -- Your cluster is registered and assigned a state of **Pending.** Rancher is deploying resources to manage your cluster. -- You can access your cluster after its state is updated to **Active.** -- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). - -> **Note:** -> You can not re-import a cluster that is currently active in a Rancher setup. - -# Imported K3s Clusters - -You can now import a K3s Kubernetes cluster into Rancher. [K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. You can also upgrade Kubernetes by editing the K3s cluster in the Rancher UI. - -### Additional Features for Imported K3s Clusters - -_Available as of v2.4.0_ - -When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - -- The ability to upgrade the K3s version -- The ability to configure the maximum number of nodes that will be upgraded concurrently -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. - -### Configuring K3s Cluster Upgrades - -> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. - -The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. - -- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes -- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes - -In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. - -Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. - -### Configuring a K3s Cluster to Enable Importation to Rancher - -The K3s server needs to be configured to allow writing to the kubeconfig file. - -This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: - -``` -$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 -``` - -The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: - -``` -$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - -``` - -### Debug Logging and Troubleshooting for Imported K3s Clusters - -Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. - -To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. - -Logs created by the `system-upgrade-controller` can be viewed by running this command: - -``` -kubectl logs -n cattle-system system-upgrade-controller -``` - -The current status of the plans can be viewed with this command: - -``` -kubectl get plans -A -o yaml -``` - -If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. - -To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. - -# Annotating Imported Clusters - -For all types of imported Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. - -Therefore, when Rancher imports a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the imported cluster. - -However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. - -By annotating an imported cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. - -This example annotation indicates that a pod security policy is enabled: - -``` -"capabilities.cattle.io/pspEnabled": "true" -``` - -The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. - -``` -"capabilities.cattle.io/ingressCapabilities": "[ - { - "customDefaultBackend":true, - "ingressProvider":"asdf" - } -]" -``` - -These capabilities can be annotated for the cluster: - -- `ingressCapabilities` -- `loadBalancerCapabilities` -- `nodePoolScalingSupported` -- `nodePortRange` -- `pspEnabled` -- `taintSupport` - -All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. - -To annotate an imported cluster, - -1. Go to the cluster view in Rancher and select **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. -1. Click **Save.** - -**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md deleted file mode 100644 index 62b501dae..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Node Requirements for Rancher Managed Clusters -weight: 1 ---- - -This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the cluster (or single node) running Rancher. - -> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.x/en/installation/requirements/) - -Make sure the nodes for the Rancher server fulfill the following requirements: - -- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) -- [Networking Requirements](#networking-requirements) -- [Optional: Security Considerations](#optional-security-considerations) - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) The capability to use Windows worker nodes in downstream clusters was added in Rancher v2.3.0. - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.x/en/installation/options/arm64-platform/) - -For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) - -### Oracle Linux and RHEL Derived Linux Nodes - -Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - -### SUSE Linux Nodes - -SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps]({{}}/rancher/v2.x/en/installation/requirements/ports/#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. - -### Flatcar Container Linux Nodes - -When [Launching Kubernetes with Rancher]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) - -{{% tabs %}} -{{% tab "Canal"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: canal - options: - canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} - -{{% tab "Calico"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: calico - options: - calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} -{{% /tabs %}} - -It is also required to enable the Docker service, you can enable the Docker service using the following command: - -``` -systemctl enable docker.service -``` - -The Docker service is enabled automatically when using [Node Drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/#node-drivers). - -### Windows Nodes - -_Windows worker nodes can be used as of Rancher v2.3.0_ - -Nodes with Windows Server must run Docker Enterprise Edition. - -Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) - -# Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. - -For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) - -For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) - -# Networking Requirements - -For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. - -The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.x/en/cluster-provisioning/). - -For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) - -Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements]({{}}/rancher/v2.x/en/installation/requirements/ports#downstream-kubernetes-cluster-nodes). - -# Optional: Security Considerations - -If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. - -For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.x/en/security/#rancher-hardening-guide) diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md deleted file mode 100644 index 2da635a79..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Checklist for Production-Ready Clusters -weight: 2 ---- - -In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. - -For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) - -This is a shortlist of best practices that we strongly recommend for all production clusters. - -For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices) - -### Node Requirements - -* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) including the port requirements. - -### Back up etcd - -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{}}/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. - -### Cluster Architecture - -* Nodes should have one of the following role configurations: - * `etcd` - * `controlplane` - * `etcd` and `controlplane` - * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) -* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -* Assign two or more nodes the `controlplane` role for master component high availability. -* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) - -For more information about the -number of nodes for each Kubernetes role, refer to the section on [recommended architecture.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/) - -### Logging and Monitoring - -* Configure alerts/notifiers for Kubernetes components (System Service). -* Configure logging for cluster analysis and post-mortems. - -### Reliability - -* Perform load tests on your cluster to verify that its hardware can support your workloads. - -### Networking - -* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). -* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md deleted file mode 100644 index 3722a97e4..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Roles for Nodes in Kubernetes -weight: 1 ---- - -This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. - -This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -# etcd - -Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. - ->**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -# controlplane - -Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. - ->**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -### kube-apiserver - -The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. - -### kube-controller-manager - -The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -### kube-scheduler - -The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -# worker - -Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. - -# References - -* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md deleted file mode 100644 index b075f9c67..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Recommended Cluster Architecture -weight: 1 ---- - -There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. - -# Separating Worker Nodes from Nodes with Other Roles - -When designing your cluster(s), you have two options: - -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements). -* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. - -In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. - -Therefore, each node should have one of the following role configurations: - - * `etcd` - * `controlplane` - * Both `etcd` and `controlplane` - * `worker` - -# Recommended Number of Nodes with Each Role - -The cluster should have: - -- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -- At least two nodes with the role `controlplane` for master component high availability. -- At least two nodes with the role `worker` for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) - - -### Number of Controlplane Nodes - -Adding more than one node with the `controlplane` role makes every master component highly available. - -### Number of etcd Nodes - -The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. - -| Nodes with `etcd` role | Majority | Failure Tolerance | -|--------------|------------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | **1** | -| 4 | 3 | 1 | -| 5 | 3 | **2** | -| 6 | 4 | 2 | -| 7 | 4 | **3** | -| 8 | 5 | 3 | -| 9 | 5 | **4** | - -References: - -* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) -* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) - -### Number of Worker Nodes - -Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. - -### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications - -You may have noticed that our [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -# References - -* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/content/rancher/v2.x/en/cluster-provisioning/registered-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/registered-clusters/_index.md deleted file mode 100644 index 539f5d455..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/registered-clusters/_index.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -title: Registering Existing Clusters -weight: 6 ---- - -_Available as of v2.5_ - -The cluster registration feature replaced the feature to import clusters. - -The control that Rancher has to manage a registered cluster depends on the type of cluster. For details, see [Management Capabilities for Registered Clusters.](#management-capabilities-for-registered-clusters) - -Registering EKS clusters now provides additional benefits. - -- [Prerequisites](#prerequisites) -- [Registering a Cluster](#registering-a-cluster) -- [Management Capabilities for Registered Clusters](#management-capabilities-for-registered-clusters) - - [Features for All Registered Clusters](#features-for-all-registered-clusters) - - [Additional Features for Registered K3s Clusters](#additional-features-for-registered-k3s-clusters) - - [Additional Features for Registered EKS Clusters](#additional-features-for-registered-eks-clusters) -- [Configuring K3s Cluster Upgrades](#configuring-k3s-cluster-upgrades) -- [Debug Logging and Troubleshooting for Registered K3s Clusters](#debug-logging-and-troubleshooting-for-registered-k3s-clusters) -- [Annotating Registered Clusters](#annotating-registered-clusters) - -# Prerequisites - -If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to register the cluster in Rancher. - -In order to apply the privilege, you need to run: - -```plain -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole cluster-admin \ - --user [USER_ACCOUNT] -``` - -before running the `kubectl` command to register the cluster. - -By default, GKE users are not given this privilege, so you will need to run the command before registering GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - -If you are registering a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-registration-in-rancher) - -# Registering a Cluster - -1. From the **Clusters** page, click **Add Cluster**. -2. Choose **Register**. -3. Enter a **Cluster Name**. -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -5. Use **Agent Environment Variables** under **Cluster Options** to set environment variables for [rancher cluster agent]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. -6. Click **Create**. -7. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. -8. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. -9. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. -10. When you finish running the command(s) on your node, click **Done**. - -**Result:** - -- Your cluster is registered and assigned a state of **Pending.** Rancher is deploying resources to manage your cluster. -- You can access your cluster after its state is updated to **Active.** -- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). - - -> **Note:** -> You can not re-register a cluster that is currently active in a Rancher setup. - -### Configuring a K3s Cluster to Enable Registration in Rancher - -The K3s server needs to be configured to allow writing to the kubeconfig file. - -This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: - -``` -$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 -``` - -The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: - -``` -$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - -``` - -# Management Capabilities for Registered Clusters - -The control that Rancher has to manage a registered cluster depends on the type of cluster. - -### Features for All Registered Clusters - -After registering a cluster, the cluster owner can: - -- [Manage cluster access]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring, alerts and notifiers]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/) -- Enable [logging]({{}}/rancher/v2.x/en/logging/v2.5/) -- Enable [Istio]({{}}/rancher/v2.x/en/istio/v2.5/) -- Use [pipelines]({{}}/rancher/v2.x/en/project-admin/pipelines/) -- Manage projects and workloads - -### Additional Features for Registered K3s Clusters - -[K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. - -When a K3s cluster is registered in Rancher, Rancher will recognize it as K3s. The Rancher UI will expose the features for [all registered clusters,](#features-for-all-registered-clusters) in addition to the following features for editing and upgrading the cluster: - -- The ability to [upgrade the K3s version]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/) -- The ability to configure the maximum number of nodes that will be upgraded concurrently -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster - -### Additional Features for Registered EKS Clusters - -Registering an Amazon EKS cluster allows Rancher to treat it as though it were created in Rancher. - -Amazon EKS clusters can now be registered in Rancher. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. - -When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -The capabilities for registered EKS clusters are listed in the table on [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/) - -# Configuring K3s Cluster Upgrades - -> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. - -The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. - -- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes -- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes - -In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. - -Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. - -# Debug Logging and Troubleshooting for Registered K3s Clusters - -Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. - -To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. - -Logs created by the `system-upgrade-controller` can be viewed by running this command: - -``` -kubectl logs -n cattle-system system-upgrade-controller -``` - -The current status of the plans can be viewed with this command: - -``` -kubectl get plans -A -o yaml -``` - -If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. - -To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. - - - - -# Annotating Registered Clusters - -For all types of registered Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. - -Therefore, when Rancher registers a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the registered cluster. - -However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. - -By annotating a registered cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. - -This example annotation indicates that a pod security policy is enabled: - -``` -"capabilities.cattle.io/pspEnabled": "true" -``` - -The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. - -``` -"capabilities.cattle.io/ingressCapabilities": "[ - { - "customDefaultBackend":true, - "ingressProvider":"asdf" - } -]" -``` - -These capabilities can be annotated for the cluster: - -- `ingressCapabilities` -- `loadBalancerCapabilities` -- `nodePoolScalingSupported` -- `nodePortRange` -- `pspEnabled` -- `taintSupport` - -All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. - -To annotate a registered cluster, - -1. Go to the cluster view in Rancher and select **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. -1. Click **Save.** - -**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. - diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md deleted file mode 100644 index d118db75b..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Launching Kubernetes with Rancher -weight: 4 ---- - -You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - -- Bare-metal servers -- On-premise virtual machines -- Virtual machines hosted by an infrastructure provider - -Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. - -RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. - -### Requirements - -If you use RKE to set up a cluster, your nodes must meet the [requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) for nodes in downstream user clusters. - -### Launching Kubernetes on New Nodes in an Infrastructure Provider - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -For more information, refer to the section on [launching Kubernetes on new nodes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -For more information, refer to the section on [custom nodes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md deleted file mode 100644 index 3a31a400a..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Setting up Cloud Providers -weight: 2300 -aliases: - - /rancher/v2.x/en/concepts/clusters/cloud-providers/ - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers ---- -A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. For more information, refer to the [official Kubernetes documentation on cloud providers.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. - -Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. - -By default, the **Cloud Provider** option is set to `None`. - -The following cloud providers can be enabled: - -* Amazon -* Azure -* GCE (Google Compute Engine) -* vSphere - -### Setting up the Amazon Cloud Provider - -For details on enabling the Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon) - -### Setting up the Azure Cloud Provider - -For details on enabling the Azure cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure) - -### Setting up the GCE Cloud Provider - -For details on enabling the Google Compute Engine cloud provider, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce) - -### Setting up the vSphere Cloud Provider - -For details on enabling the vSphere cloud provider, refer to [this page.](./vsphere) - -### Setting up a Custom Cloud Provider - -The `Custom` cloud provider is available if you want to configure any [Kubernetes cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). - -For the custom cloud provider option, you can refer to the [RKE docs]({{}}/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration : - -* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/) -* [OpenStack]({{}}/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md deleted file mode 100644 index e855934b9..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Setting up the Amazon Cloud Provider -weight: 1 ---- - -When using the `Amazon` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. -- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. - -See [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for all information regarding the Amazon cloud provider. - -To set up the Amazon cloud provider, - -1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) -2. [Configure the ClusterID](#2-configure-the-clusterid) - -### 1. Create an IAM Role and attach to the instances - -All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: - -* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. -* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. - -While creating an [Amazon EC2 cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. - -While creating a [Custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes), you must manually attach the IAM role to the instance(s). - -IAM Policy for nodes with the `controlplane` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } -] -} -``` - -IAM policy for nodes with the `etcd` or `worker` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } -] -} -``` - -### 2. Configure the ClusterID - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster. -- **Security Group**: The security group used for your cluster. - ->**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). - -When you create an [Amazon EC2 Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. - -Use the following tag: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` - -`CLUSTERID` can be any string you like, as long as it is equal across all tags set. - -Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. - -### Using Amazon Elastic Container Registry (ECR) - -The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md deleted file mode 100644 index 258845725..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Setting up the Azure Cloud Provider -weight: 2 ---- - -When using the `Azure` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. - -- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. - -- **Network Storage:** Support Azure Files via CIFS mounts. - -The following account types are not supported for Azure Subscriptions: - -- Single tenant accounts (i.e. accounts with no subscriptions). -- Multi-subscription accounts. - -To set up the Azure cloud provider following credentials need to be configured: - -1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) -2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) -3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) -4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) - -### 1. Set up the Azure Tenant ID - -Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). - -If you want to use the Azure CLI, you can run the command `az account show` to get the information. - -### 2. Set up the Azure Client ID and Azure Client Secret - -Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). - -1. Select **Azure Active Directory**. -1. Select **App registrations**. -1. Select **New application registration**. -1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. -1. Select **Create**. - -In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. - -The next step is to generate the **Azure Client Secret**: - -1. Open your created App registration. -1. In the **Settings** view, open **Keys**. -1. Enter a **Key description**, select an expiration time and select **Save**. -1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. - -### 3. Configure App Registration Permissions - -The last thing you will need to do, is assign the appropriate permissions to your App registration. - -1. Go to **More services**, search for **Subscriptions** and open it. -1. Open **Access control (IAM)**. -1. Select **Add**. -1. For **Role**, select `Contributor`. -1. For **Select**, select your created App registration name. -1. Select **Save**. - -### 4. Set up Azure Network Security Group Name - -A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. - -If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. - -You should already assign custom hosts to this Network Security Group during provisioning. - -Only hosts expected to be load balancer back ends need to be in this group. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md deleted file mode 100644 index 000b537c1..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Setting up the Google Compute Engine Cloud Provider -weight: 3 ---- - -In this section, you'll learn how to enable the Google Compute Engine (GCE) cloud provider for custom clusters in Rancher. A custom cluster is one in which Rancher installs Kubernetes on existing nodes. - -The official Kubernetes documentation for the GCE cloud provider is [here.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#gce) - -> **Prerequisites:** The service account of `Identity and API` access on GCE needs the `Computer Admin` permission. - -If you are using Calico, - -1. Go to the cluster view in the Rancher UI, and click **⋮ > Edit.** -1. Click **Edit as YAML,** and enter the following configuration: - - ``` - rancher_kubernetes_engine_config: - cloud_provider: - name: gce - customCloudProvider: |- - [Global] - project-id= - network-name= - subnetwork-name= - node-instance-prefix= - node-tags= - network: - options: - calico_cloud_provider: "gce" - plugin: "calico" - ``` - -If you are using Canal or Flannel, - -1. Go to the cluster view in the Rancher UI, and click **⋮ > Edit.** -1. Click **Edit as YAML,** and enter the following configuration: - - ``` - rancher_kubernetes_engine_config: - cloud_provider: - name: gce - customCloudProvider: |- - [Global] - project-id= - network-name= - subnetwork-name= - node-instance-prefix= - node-tags= - services: - kube_controller: - extra_args: - configure-cloud-routes: true # we need to allow the cloud provider configure the routes for the hosts - ``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md deleted file mode 100644 index 9e999ad08..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Setting up the vSphere Cloud Provider -weight: 4 ---- -In this section, you'll learn how to set up a vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. - -# In-tree Cloud Provider - -To use the in-tree vSphere cloud provider, you will need to use an RKE configuration option. For details, refer to [this page.](./in-tree) - -# Out-of-tree Cloud Provider - -_Available as of v2.5+_ - -To set up the out-of-tree vSphere cloud provider, you will need to install Helm charts from the Rancher marketplace. For details, refer to [this page.](./out-of-tree) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md deleted file mode 100644 index d4a19ef8b..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: How to Configure In-tree vSphere Cloud Provider -shortTitle: In-tree Cloud Provider -weight: 10 ---- - -To set up the in-tree vSphere cloud provider, follow these steps while creating the vSphere cluster in Rancher: - -1. Set **Cloud Provider** option to `Custom` or `Custom (In-Tree)`. - - {{< img "/img/rancher/vsphere-node-driver-cloudprovider.png" "vsphere-node-driver-cloudprovider">}} - -1. Click on **Edit as YAML** -1. Insert the following structure to the pre-populated cluster YAML. This structure must be placed under `rancher_kubernetes_engine_config`. Note that the `name` *must* be set to `vsphere`. - - ```yaml - rancher_kubernetes_engine_config: - cloud_provider: - name: vsphere - vsphereCloudProvider: - [Insert provider configuration] - ``` - -Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md deleted file mode 100644 index b2e11e896..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: How to Configure Out-of-tree vSphere Cloud Provider -shortTitle: Out-of-tree Cloud Provider -weight: 10 ---- -_Available as of v2.5+_ - -Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. - -This page covers how to install the Cloud Provider Interface (CPI) and Cloud Storage Interface (CSI) plugins after bringing up a cluster. - -# Prerequisites - -The vSphere version must be 7.0u1 or higher. - -The Kubernetes version must be 1.19 or higher. - -Using the vSphere out-of-tree cloud provider requires Linux nodes and is not supported on Windows. - -# Installation - -The Cloud Provider Interface (CPI) should be installed first before installing the Cloud Storage Interface (CSI). - -### 1. Create a vSphere cluster - -1. On the Clusters page, click on **Add Cluster** and select the **vSphere** option or **Existing Nodes** option. -1. Under **Cluster Options > Cloud Provider** select **External (Out-of-tree)**. This sets the cloud provider option on the Kubernetes cluster to `external` which sets your Kubernetes cluster up to be configured with an out-of-tree cloud provider. -1. Finish creating your cluster. - -### 2. Install the CPI plugin - -1. From the **Cluster Explorer** view, go to the top left dropdown menu and click **Apps & Marketplace.** -1. Select the **vSphere CPI** chart. Fill out the required vCenter details. -1. vSphere CPI initializes all nodes with ProviderID which is needed by the vSphere CSI driver. Check if all nodes are initialized with the ProviderID before installing CSI driver with the following command: - - ``` - kubectl describe nodes | grep "ProviderID" - ``` - -### 3. Installing the CSI plugin - -1. From the **Cluster Explorer** view, go to the top left dropdown menu and click **Apps & Marketplace.** -2. Select the **vSphere CSI** chart. Fill out the required vCenter details. -3. Set **Enable CSI Migration** to **false**. -4. This chart creates a StorageClass with the `csi.vsphere.vmware.com` as the provisioner. Fill out the details for the StorageClass and launch the chart. - -# Using the CSI driver for provisioning volumes -The CSI chart by default creates a storageClass. - -If that option was not selected while launching the chart, create a storageClass with the `csi.vsphere.vmware.com` as the provisioner. - -All volumes provisioned using this StorageClass will get provisioned by the CSI driver. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md deleted file mode 100644 index 8694af372..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Migrating vSphere In-tree Volumes to CSI -weight: 5 ---- -_Available as of v2.5+_ - -Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. - -This page covers how to migrate from the in-tree vSphere cloud provider to out-of-tree, and manage the existing VMs post migration. -It follows the steps provided in the official [vSphere migration documentation](https://vsphere-csi-driver.sigs.k8s.io/features/vsphere_csi_migration.html) and provides the steps to be performed in Rancher. -### Cloud-config Format Limitation -Existing volumes that were provisioned using the following cloud-config format will NOT get migrated due to an existing bug in vsphere CSI. -If the cloud-config has this format for datastore and resource pool path, vsphere CSI driver cannot recognize it: -```yaml -default-datastore: /datastore/ -resourcepool-path: "/host//Resources/" -``` -Volumes provisioned with the in-tree provider using the following format will get migrated correctly: -```yaml -default-datastore: -resourcepool-path: "/Resources/" -``` -Upstream bug: https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/628 -Rancher issue tracking this bug: https://github.com/rancher/rancher/issues/31105 -# Prerequisites -- vSphere CSI Migration requires vSphere 7.0u1. In order to be able to manage existing in-tree vSphere volumes, upgrade vSphere to 7.0u1. -- The Kubernetes version must be 1.19 or higher. -# Migration -### 1. Install the CPI plugin -Before installing CPI, we need to taint all nodes with `node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule`. -This can be done by running the following commands: -``` -curl -O https://raw.githubusercontent.com/rancher/helm3-charts/56b622f519728378abeddfe95074f1b87ab73b1e/charts/vsphere-cpi/taints.sh -``` -Or: -``` -wget https://raw.githubusercontent.com/rancher/helm3-charts/56b622f519728378abeddfe95074f1b87ab73b1e/charts/vsphere-cpi/taints.sh -chmod +x taints.sh -./taints.sh -``` - -Once all nodes are tainted by the running the script, launch the Helm vSphere CPI chart. - -1. From the **Cluster Explorer** view, go to the top left dropdown menu and click **Apps & Marketplace.** -2. Select the **vSphere CPI** chart. -3. Fill out the required vCenter details and click **Launch**. - -vSphere CPI initializes all nodes with ProviderID, which is needed by the vSphere CSI driver. -Check if all nodes are initialized with the ProviderID with the following command: -``` -kubectl describe nodes | grep "ProviderID" -``` - -### 2. Install the CSI driver - -1. From the **Cluster Explorer** view, go to the top left dropdown menu and click **Apps & Marketplace.** -1. Select the **vSphere CSI** chart. -1. Fill out the required vCenter details and click **Launch**. -1. Set **Enable CSI Migration** to **true**. -1. This chart creates a StorageClass with the `csi.vsphere.vmware.com` as the provisioner. You can provide the URL of the datastore to be used for CSI volume provisioning while creating this StorageClass. The datastore URL can be found in the vSphere client by selecting the datastore and going to the Summary tab. Fill out the details for the StorageClass and click **Launch**. -### 3. Edit the cluster to enable CSI migration feature flags -1. While editing the cluster, if the Kubernetes version is less than 1.19, select Kubernetes version 1.19 or higher from the **Kubernetes Version** dropdown. -2. For enabling feature flags, click on "Edit as YAML", and add the following under kube-controller and kubelet: - ```yaml - extra_args: - feature-gates: "CSIMigration=true,CSIMigrationvSphere=true" - ``` -### 4. Drain worker nodes -Worker nodes must be drained during the upgrade before changing the kubelet and kube-controller-manager args. -1. Click **Edit as Form** and then click on "Advanced Options." -1. Set the field **Maximum Worker Nodes Unavailable** to count of 1. -1. To drain the nodes during upgrade, select **Drain Nodes > Yes**. -1. Set **Force** and **Delete Local Data** to **true**. -1. Click **Save** to upgrade the cluster. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md deleted file mode 100644 index b9673d8e9..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Launching Kubernetes on Existing Custom Nodes -description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements -metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" -weight: 2225 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ - - /rancher/v2.x/en/cluster-provisioning/custom-clusters/ ---- - -When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. - -To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. - -This section describes how to set up a custom cluster. - -# Creating a Cluster with Custom Nodes - ->**Want to use Windows hosts as Kubernetes workers?** -> ->See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. - - - -- [1. Provision a Linux Host](#1-provision-a-linux-host) -- [2. Create the Custom Cluster](#2-create-the-custom-cluster) -- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) - - - -### 1. Provision a Linux Host - -Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-prem VM -- A bare-metal server - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -Provision the host according to the [installation requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) and the [checklist for production-ready clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/production) - -### 2. Create the Custom Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Custom**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** - - >**Using Windows nodes as Kubernetes workers?** - > - >- See [Enable the Windows Support Option]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). - >- The only Network Provider available for clusters with Windows support is Flannel. -6. Click **Next**. - -7. From **Node Role**, choose the roles that you want filled by a cluster node. - - >**Notes:** - > - >- Using Windows nodes as Kubernetes workers? See [this section]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). - >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). - -8. **Optional**: Click **[Show advanced options]({{}}/rancher/v2.x/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. - -9. Copy the command displayed on screen to your clipboard. - -10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - - >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. - -11. When you finish running the command(s) on your Linux host(s), click **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -### 3. Amazon Only: Tag Resources - -If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. - -[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) - ->**Note:** You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster -- **Security Group**: The security group used for your cluster. - - >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. - -The tag that should be used is: - -``` -Key=kubernetes.io/cluster/, Value=owned -``` - -`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. - -If you share resources between clusters, you can change the tag to: - -``` -Key=kubernetes.io/cluster/CLUSTERID, Value=shared -``` - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md deleted file mode 100644 index efcae9b49..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Rancher Agent Options -weight: 2500 -aliases: - - /rancher/v2.x/en/admin-settings/agent-options/ - - /rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options ---- - -Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) and add the options to the generated `docker run` command when adding a node. - -For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture/#3-node-agents) - -## General options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | -| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | -| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | -| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | -| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | -| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | - -## Role options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | -| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | -| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | -| `--worker` | `WORKER=true` | Apply the role `worker` to the node | - -## IP address options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | -| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | - -### Dynamic IP address options - -For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. - -| Value | Example | Description | -| ---------- | -------------------- | ----------- | -| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | -| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | -| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | -| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | -| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | -| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | -| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | -| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | -| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | -| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | -| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | -| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md deleted file mode 100644 index 906ddc886..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Launching Kubernetes on New Nodes in an Infrastructure Provider -weight: 2205 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/node-templates/ ---- - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -The available cloud providers to create a node template are decided based on active [node drivers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). - -This section covers the following topics: - -- [Node templates](#node-templates) - - [Node labels](#node-labels) - - [Node taints](#node-taints) - - [Administrator control of node templates](#administrator-control-of-node-templates) -- [Node pools](#node-pools) - - [Node pool taints](#node-pool-taints) - - [About node auto-replace](#about-node-auto-replace) - - [Enabling node auto-replace](#enabling-node-auto-replace) - - [Disabling node auto-replace](#disabling-node-auto-replace) -- [Cloud credentials](#cloud-credentials) -- [Node drivers](#node-drivers) - -# Node Templates - -A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. - -After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. - -### Node Labels - -You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. - -### Node Taints - -_Available as of Rancher v2.3.0_ - -You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. - -Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### Administrator Control of Node Templates - -_Available as of v2.3.3_ - -Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. - -To access all node templates, an administrator will need to do the following: - -1. In the Rancher UI, click the user profile icon in the upper right corner. -1. Click **Node Templates.** - -**Result:** All node templates are listed and grouped by owner. The templates can be edited or cloned by clicking the **⋮.** - -# Node Pools - -Using Rancher, you can create pools of nodes based on a [node template](#node-templates). - -A node template defines the configuration of a node, like what operating system to use, number of CPUs and amount of memory. - -The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. - -Each node pool is assigned with a [node component]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) to specify how these nodes should be configured for the Kubernetes cluster. - -Each node pool must have one or more nodes roles assigned. - -Each node role (i.e. etcd, control plane, and worker) should be assigned to a distinct node pool. Although it is possible to assign multiple node roles to a node pool, this should not be done for production clusters. - -The recommended setup is to have: - -- a node pool with the etcd node role and a count of three -- a node pool with the control plane node role and a count of at least two -- a node pool with the worker node role and a count of at least two - -### Node Pool Taints - -_Available as of Rancher v2.3.0_ - -If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. - -For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. - -When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### About Node Auto-replace - -_Available as of Rancher v2.3.0_ - -If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. - -> **Important:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. - -### Enabling Node Auto-replace - -When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. - -1. In the form for creating a cluster, go to the **Node Pools** section. -1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Fill out the rest of the form for creating a cluster. - -**Result:** Node auto-replace is enabled for the node pool. - -You can also enable node auto-replace after the cluster is created with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Click **Save.** - -**Result:** Node auto-replace is enabled for the node pool. - -### Disabling Node Auto-replace - -You can disable node auto-replace from the Rancher UI with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. -1. Click **Save.** - -**Result:** Node auto-replace is disabled for the node pool. - -# Cloud Credentials - -_Available as of v2.2.0_ - -Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: - -- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. - -- After the cloud credential is created, it can be re-used to create additional node templates. - -- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. - -> **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/). - -# Node Drivers - -If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md deleted file mode 100644 index 251393573..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Creating an Azure Cluster -shortTitle: Azure -weight: 2220 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-azure/ ---- - -In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. - -First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. - -Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) - -For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](./azure-node-template-config) - -- [Preparation in Azure](#preparation-in-azure) -- [Creating an Azure Cluster](#creating-an-azure-cluster) - -# Preparation in Azure - -Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. - -To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. - -The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: - -``` -az ad sp create-for-rbac \ - --name="" \ - --role="Contributor" \ - --scopes="/subscriptions/" -``` - -The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, *The client secret*, and *The tenant ID*. This information will be used when you create a node template for Azure. - -# Creating an Azure Cluster - -{{%tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Azure**. -1. Enter your Azure credentials. -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](./azure-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in Azure. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Azure**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -Use Rancher to create a Kubernetes cluster in Azure. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Azure**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Azure Options** form. For help filling out the form, refer to the [Azure node template configuration reference.](./azure-node-template-config) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md deleted file mode 100644 index 1c2db8c79..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Azure Node Template Configuration -weight: 1 ---- - -For more information about Azure, refer to the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/?product=featured) - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. - -- **Placement** sets the geographical region where your cluster is hosted and other location metadata. -- **Network** configures the networking used in your cluster. -- **Instance** customizes your VM configuration. - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -- **Account Access** stores your account information for authenticating with Azure. -- **Placement** sets the geographical region where your cluster is hosted and other location metadata. -- **Network** configures the networking used in your cluster. -- **Instance** customizes your VM configuration. - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md deleted file mode 100644 index 76aacc91d..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Creating a DigitalOcean Cluster -shortTitle: DigitalOcean -weight: 2215 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-digital-ocean/ ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. - -First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. - -Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **DigitalOcean**. -1. Enter your Digital Ocean credentials. -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](./do-node-template-config) - -### 3. Create a cluster with node pools using the node template - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **DigitalOcean**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **DigitalOcean**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Digital Ocean Options** form. For help filling out the form, refer to the [Digital Ocean node template configuration reference.](./do-node-template-config) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -{{% /tab %}} -{{% /tabs %}} - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md deleted file mode 100644 index 4d9a0066f..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: DigitalOcean Node Template Configuration -weight: 1 ----- - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. - -### Droplet Options - -The **Droplet Options** provision your cluster's geographical region and specifications. - -### Docker Daemon - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -### Access Token - -The **Access Token** stores your DigitalOcean Personal Access Token. Refer to [DigitalOcean Instructions: How To Generate a Personal Access Token](https://www.digitalocean.com/community/tutorials/how-to-use-the-digitalocean-api-v2#how-to-generate-a-personal-access-token). - -### Droplet Options - -The **Droplet Options** provision your cluster's geographical region and specifications. - -### Docker Daemon - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md deleted file mode 100644 index f6efca2bc..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Creating an Amazon EC2 Cluster -shortTitle: Amazon EC2 -description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher -weight: 2210 ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. - -First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. - -Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -### Prerequisites - -- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) -- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. - -> **Note:** Rancher v2.4.6 and v2.4.7 had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. - -# Creating an EC2 Cluster - -The steps to create a cluster differ based on your Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Amazon.** -1. In the **Region** field, select the AWS region where your cluster nodes will be located. -1. Enter your AWS EC2 **Access Key** and **Secret Key.** -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials and information from EC2 - -Creating a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Add one or more node pools to your cluster. For more information about node pools, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Amazon EC2**. -1. Enter a **Cluster Name**. -1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) -1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Amazon EC2**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** Refer to [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) To create a node template, click **Add Node Template**. For help filling out the node template, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) -1. Click **Create**. -1. **Optional:** Add additional node pools. -1. Review your cluster settings to confirm they are correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# IAM Policies - -> **Note:** Rancher v2.4.6 and v2.4.7 had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. - -### Example IAM Policy - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` - -### Example IAM Policy with PassRole - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", - "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` -### Example IAM Policy to allow encrypted EBS volumes -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:GenerateDataKeyWithoutPlaintext", - "kms:Encrypt", - "kms:DescribeKey", - "kms:CreateGrant", - "ec2:DetachVolume", - "ec2:AttachVolume", - "ec2:DeleteSnapshot", - "ec2:DeleteTags", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteVolume", - "ec2:CreateSnapshot" - ], - "Resource": [ - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", - "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" - ] - }, - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots" - ], - "Resource": "*" - } - ] -} -``` diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md deleted file mode 100644 index 9b8089cbf..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: EC2 Node Template Configuration -weight: 1 ---- - -For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -### Region - -In the **Region** field, select the same region that you used when creating your cloud credentials. - -### Cloud Credentials - -Your AWS account access information, stored in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) - -See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - -See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. - -See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM - -See our three example JSON policies: - -- [Example IAM Policy]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. - -### Authenticate & Configure Nodes - -Choose an availability zone and network settings for your cluster. - -### Security Group - -Choose the default security group or configure a security group. - -Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. - -### Instance Options - -Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. - -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -### Engine Options - -In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -### Account Access - -**Account Access** is where you configure the region of the nodes, and the credentials (Access Key and Secret Key) used to create the machine. - -See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - -See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. - -See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM - -See our three example JSON policies: - -- [Example IAM Policy]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. - -### Zone and Network - -**Zone and Network** configures the availability zone and network settings for your cluster. - -### Security Groups - -**Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. - -### Instance - -**Instance** configures the instances that will be created. - -### SSH User - -Make sure you configure the correct **SSH User** for the configured AMI. - -### IAM Instance Profile Name - -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -### Docker Daemon - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md deleted file mode 100644 index 6db13f971..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Creating a vSphere Cluster -shortTitle: vSphere -description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -weight: 2225 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ ---- - -By using Rancher with vSphere, you can bring cloud operations on-premises. - -Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. - -A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. - -- [vSphere Enhancements in Rancher v2.3](#vsphere-enhancements-in-rancher-v2-3) -- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) -- [Provisioning Storage](#provisioning-storage) -- [Enabling the vSphere Cloud Provider](#enabling-the-vsphere-cloud-provider) - -# vSphere Enhancements in Rancher v2.3 - -The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: - -### Self-healing Node Pools - -_Available as of v2.3.0_ - -One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. - -> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -### Dynamically Populated Options for Instances and Scheduling - -_Available as of v2.3.3_ - -Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. - -For the fields to be populated, your setup needs to fulfill the [prerequisites.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) - -### More Supported Operating Systems - -In Rancher v2.3.3+, you can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) - -In Rancher before v2.3.3, the vSphere node driver included in Rancher only supported the provisioning of VMs with [RancherOS]({{}}/os/v1.x/en/) as the guest operating system. - -### Video Walkthrough of v2.3.3 Node Template Features - -In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. - -{{< youtube id="dPIwg6x1AlU">}} - -# Creating a vSphere Cluster - -In [this section,](./provisioning-vsphere-clusters) you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. - -# Provisioning Storage - -For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) - -# Enabling the vSphere Cloud Provider - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. - -For details, refer to the section on [enabling the vSphere cloud provider.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md deleted file mode 100644 index 90b0aef33..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Creating Credentials in the vSphere Console -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials ---- - -This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. - -The following table lists the permissions required for the vSphere user account: - -| Privilege Group | Operations | -|:----------------------|:-----------------------------------------------------------------------| -| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | -| Network | Assign | -| Resource | AssignVMToPool | -| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | - -The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: - -1. From the **vSphere** console, go to the **Administration** page. - -2. Go to the **Roles** tab. - -3. Create a new role. Give it a name and select the privileges listed in the permissions table above. - - {{< img "/img/rancher/rancherroles1.png" "image" >}} - -4. Go to the **Users and Groups** tab. - -5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. - - {{< img "/img/rancher/rancheruser.png" "image" >}} - -6. Go to the **Global Permissions** tab. - -7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. - - {{< img "/img/rancher/globalpermissionuser.png" "image" >}} - - {{< img "/img/rancher/globalpermissionrole.png" "image" >}} - -**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md deleted file mode 100644 index bddd5ddd3..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Provisioning Kubernetes Clusters in vSphere -weight: 1 ---- - -In this section, you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. - -First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. - -Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) - -For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) - -- [Preparation in vSphere](#preparation-in-vsphere) -- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) - -# Preparation in vSphere - -This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. - -The node templates are documented and tested with the vSphere Web Services API version 6.5. - -### Create Credentials in vSphere - -Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. - -Refer to this [how-to guide]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. - -### Network Permissions - -It must be ensured that the hosts running the Rancher server are able to establish the following network connections: - -- To the vSphere API on the vCenter server (usually port 443/TCP). -- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required with Rancher before v2.3.3 or when using the ISO creation method in later versions*). -- To port 22/TCP and 2376/TCP on the created VMs - -See [Node Networking Requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. - -### Valid ESXi License for vSphere API Access - -The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. - -### VM-VM Affinity Rules for Clusters with DRS - -If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. - -# Creating a vSphere Cluster - -The a vSphere cluster is created in Rancher depends on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **vSphere**. -1. Enter your vSphere credentials. For help, refer to **Account Access** in the [configuration reference for your Rancher version.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/) -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: - - [v2.3.3]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3) - - [v2.3.0]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0) - - [v2.2.0]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0) - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in vSphere. - -1. Navigate to **Clusters** in the **Global** view. -1. Click **Add Cluster** and select the **vSphere** infrastructure provider. -1. Enter a **Cluster Name.** -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -Use Rancher to create a Kubernetes cluster in vSphere. - -For Rancher versions before v2.0.4, when you create the cluster, you will also need to follow the steps in [this section](http://localhost:9001/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vpshere-node-template-config/prior-to-2.0.4/#disk-uuids) to enable disk UUIDs. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **vSphere**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more [node pools]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **vSphere Options** form. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: - - [v2.0.4]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4) - - [before v2.0.4]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4) -1. Review your options to confirm they're correct. Then click **Create** to start provisioning the VMs and Kubernetes services. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} - - - - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. -- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md deleted file mode 100644 index d660f823f..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: VSphere Node Template Configuration -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference - - /rancher/v2.x/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids ---- - -The vSphere node templates in Rancher were updated in the following Rancher versions. Refer to the newest configuration reference that is less than or equal to your Rancher version: - -- [v2.3.3](./v2.3.3) -- [v2.3.0](./v2.3.0) -- [v2.2.0](./v2.2.0) -- [v2.0.4](./v2.0.4) - -For Rancher versions before v2.0.4, refer to [this version.](./prior-to-2.0.4) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md deleted file mode 100644 index 9801050ad..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher before v2.0.4 -shortTitle: Before v2.0.4 -weight: 5 ---- - -- [Account access](#account-access) -- [Scheduling](#scheduling) -- [Instance options](#instance-options) -- [Disk UUIDs](#disk-uuids) -- [Node Tags and Custom Attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access -In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | * | Port to use when connecting to the server. Defaults to `443`. | -| Username | * | vCenter/ESXi user to authenticate with the server. | -| Password | * | User's password. | - - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -# Disk UUIDs - -In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. Follow these instructions to enable UUIDs for the nodes in your vSphere cluster. - -To enable disk UUIDs for all VMs created for a cluster, - -1. Navigate to the **Node Templates** in the Rancher UI while logged in as an administrator. -2. Add or edit an existing vSphere node template. -3. Under **Instance Options** click on **Add Parameter**. -4. Enter `disk.enableUUID` as key with a value of **TRUE**. - - {{< img "/img/rke/vsphere-nodedriver-enable-uuid.png" "vsphere-nodedriver-enable-uuid" >}} - -5. Click **Create** or **Save**. - -**Result:** The disk UUID is enabled in the vSphere node template. - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md deleted file mode 100644 index f53ea2087..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.0.4 -shortTitle: v2.0.4 -weight: 4 ---- -- [Account access](#account-access) -- [Scheduling](#scheduling) -- [Instance options](#instance-options) -- [Node Tags and Custom Attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access -In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | * | Port to use when connecting to the server. Defaults to `443`. | -| Username | * | vCenter/ESXi user to authenticate with the server. | -| Password | * | User's password. | - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md deleted file mode 100644 index 60410e3a6..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.2.0 -shortTitle: v2.2.0 -weight: 3 ---- -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------|----------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling -Choose what hypervisor the virtual machine will be scheduled to. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md deleted file mode 100644 index 337c62103..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.3.0 -shortTitle: v2.3.0 -weight: 2 ---- -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------------|-----------------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling -Choose what hypervisor the virtual machine will be scheduled to. - -In the **Scheduling** section, enter: - -- The name/path of the **Data Center** to create the VMs in -- The name of the **VM Network** to attach to -- The name/path of the **Datastore** to store the disks in - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md deleted file mode 100644 index 5021bdf67..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.3.3 -shortTitle: v2.3.3 -weight: 1 ---- -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Networks](#networks) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [cloud-init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------------|--------------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. - -| Field | Required | Explanation | -|---------|---------------|-----------| -| Data Center | * | Choose the name/path of the data center where the VM will be scheduled. | -| Resource Pool | | Name of the resource pool to schedule the VMs in. Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | -| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. | -| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -| Parameter | Required | Description | -|:----------------|:--------:|:-----------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | -| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | -| Networks | | Name(s) of the network to attach the VM to. | -| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - - -### About VM Creation Methods - -In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). - -The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). - -Choose the way that the VM will be created: - -- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. -- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list **Library templates.** -- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. -- **Install from boot2docker ISO:** Ensure that the **OS ISO URL** field contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). Note that this URL must be accessible from the nodes running your Rancher server installation. - -# Networks - -The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. - -# Node Tags and Custom Attributes - -Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -For tags, all your vSphere tags will show up as options to select from in your node template. - -In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# cloud-init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. - -Note that cloud-init is not supported when using the ISO creation method. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md deleted file mode 100644 index a03651da2..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md +++ /dev/null @@ -1,408 +0,0 @@ ---- -title: RKE Cluster Configuration Reference -weight: 2250 ---- - -When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) as the Kubernetes distribution. - -This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. - -You can configure the Kubernetes options one of two ways: - -- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -In Rancher v2.0.0-v2.2.x, the RKE cluster config file in Rancher is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) - -This section is a cluster configuration reference, covering the following topics: - -- [Rancher UI Options](#rancher-ui-options) - - [Kubernetes version](#kubernetes-version) - - [Network provider](#network-provider) - - [Kubernetes cloud providers](#kubernetes-cloud-providers) - - [Private registries](#private-registries) - - [Authorized cluster endpoint](#authorized-cluster-endpoint) - - [Node pools](#node-pools) -- [Advanced Options](#advanced-options) - - [NGINX Ingress](#nginx-ingress) - - [Node port range](#node-port-range) - - [Metrics server monitoring](#metrics-server-monitoring) - - [Pod security policy support](#pod-security-policy-support) - - [Docker version on nodes](#docker-version-on-nodes) - - [Docker root directory](#docker-root-directory) - - [Recurring etcd snapshots](#recurring-etcd-snapshots) - - [Agent Environment Variables](#agent-environment-variables) -- [Cluster config file](#cluster-config-file) - - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0) - - [Config file structure in Rancher v2.0.0-v2.2.x](#config-file-structure-in-rancher-v2-0-0-v2-2-x) - - [Default DNS provider](#default-dns-provider) -- [Rancher specific parameters](#rancher-specific-parameters) - -# Rancher UI Options - -When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. - -### Kubernetes Version - -The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). - -### Network Provider - -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.x/en/faq/networking/cni-providers/). - ->**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - -Out of the box, Rancher is compatible with the following network providers: - -- [Canal](https://github.com/projectcalico/canal) -- [Flannel](https://github.com/coreos/flannel#flannel) -- [Calico](https://docs.projectcalico.org/v3.11/introduction/) -- [Weave](https://github.com/weaveworks/weave) (Available as of v2.2.0) - -**Notes on Canal:** - -In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). - -As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). - ->**Attention Rancher v2.0.0 - v2.0.6 Users** -> ->- In previous Rancher releases, Canal isolates project network communications with no option to disable it. If you are using any of these Rancher releases, be aware that using Canal prevents all communication between pods in different projects. ->- If you have clusters using Canal and are upgrading to v2.0.7, those clusters enable Project Network Isolation by default. If you want to disable Project Network Isolation, edit the cluster and disable the option. - -**Notes on Flannel:** - -In v2.0.5, this was the default option, which did not prevent any network isolation between projects. - -**Notes on Weave:** - -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). - -### Kubernetes Cloud Providers - -You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. - ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. - -If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: - -### Private registries - -_Available as of v2.2.0_ - -The cluster-level private registry configuration is only used for provisioning clusters. - -There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.x/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. - -The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. - -- **System images** are components needed to maintain the Kubernetes cluster. -- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. - -See the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. - -### Authorized Cluster Endpoint - -_Available as of v2.2.0_ - -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. - -> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE]({{}}/rancher/v2.x/en/overview/architecture/#tools-for-provisioning-kubernetes-clusters). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are imported into Rancher; it is available only on Rancher-launched Kubernetes clusters. - -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. - -For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -### Node Pools - -For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools) - -# Advanced Options - -The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** - -### NGINX Ingress - -Option to enable or disable the [NGINX ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). - -### Node Port Range - -Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. - -### Metrics Server Monitoring - -Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). - -### Pod Security Policy Support - -Option to enable and select a default [Pod Security Policy]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. - -### Docker Version on Nodes - -Option to require [a supported Docker version]({{}}/rancher/v2.x/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. - -### Docker Root Directory - -If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. - -### Recurring etcd Snapshots - -Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). - -### Agent Environment Variables - -_Available as of v2.5.6_ - -Option to set environment variables for [rancher agents]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. - -# Cluster Config File - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. - ->**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from a file**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. - -### Config File Structure in Rancher v2.3.0+ - -RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. - -{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.3.0+" %}} - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` -{{% /accordion %}} - -### Config File Structure in Rancher v2.0.0-v2.2.x - -An example cluster config file is included below. - -{{% accordion id="before-v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.0.0-v2.2.x" %}} -```yaml -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.15.3-rancher3-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 -ssh_agent_auth: false -``` -{{% /accordion %}} - -### Default DNS provider - -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. - -| Rancher version | Kubernetes version | Default DNS provider | -|-------------|--------------------|----------------------| -| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | -| v2.2.5 and higher | v1.13.x and lower | kube-dns | -| v2.2.4 and lower | any | kube-dns | - -# Rancher specific parameters - -_Available as of v2.2.0_ - -Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): - -### docker_root_dir - -See [Docker Root Directory](#docker-root-directory). - -### enable_cluster_monitoring - -Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/). - -### enable_network_policy - -Option to enable or disable Project Network Isolation. - -### local_cluster_auth_endpoint - -See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). - -Example: - -```yaml -local_cluster_auth_endpoint: - enabled: true - fqdn: "FQDN" - ca_certs: "BASE64_CACERT" -``` - -### Custom Network Plug-in - -_Available as of v2.2.4_ - -You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. - -There are two ways that you can specify an add-on: - -- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) -- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) - -For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md deleted file mode 100644 index 009fca03a..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Assigning Pod Security Policies -weight: 2260 ---- - -_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). - -## Adding a Default Pod Security Policy - -When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. - ->**Prerequisite:** ->Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). ->**Note:** ->For security purposes, we recommend assigning a PSP as you create your clusters. - -To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. - -When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md deleted file mode 100644 index 88eae431f..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Rancher Agents -weight: 2400 ---- - -There are two different agent resources deployed on Rancher managed clusters: - -- [cattle-cluster-agent](#cattle-cluster-agent) -- [cattle-node-agent](#cattle-node-agent) - -For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture]({{}}/rancher/v2.x/en/overview/architecture/) - -### cattle-cluster-agent - -The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. - -### cattle-node-agent - -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. - -> **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. - -### Scheduling rules - -_Applies to v2.5.4 and higher_ - -Starting with Rancher v2.5.4, the tolerations for the `cattle-cluster-agent` changed from `operator:Exists` (allowing all taints) to a fixed set of tolerations (listed below, if no controlplane nodes are visible in the cluster) or dynamically added tolerations based on taints applied to the controlplane nodes. This change was made to allow [Taint based Evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions) to work properly for `cattle-cluster-agent`. The default tolerations are described below. If controlplane nodes are present the cluster, the tolerations will be replaced with tolerations matching the taints on the controlplane nodes. - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | **Note:** These are the default tolerations, and will be replaced by tolerations matching taints applied to controlplane nodes.

`effect:NoSchedule`
`key:node-role.kubernetes.io/controlplane`
`value:true`

`effect:NoSchedule`
`key:node-role.kubernetes.io/control-plane`
`operator:Exists`

`effect:NoSchedule`
`key:node-role.kubernetes.io/master`
`operator:Exists` | -| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | - -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. When there are no controlplane nodes visible in the cluster (this is usually the case when using [Clusters from Hosted Kubernetes Providers]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/)), you can add the label `cattle.io/cluster-agent=true` on a node to prefer scheduling the `cattle-cluster-agent` pod to that node. - -See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. - -The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: - -| Weight | Expression | -| ------ | ------------------------------------------------ | -| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | -| 100 | `node-role.kubernetes.io/control-plane:In:"true"` | -| 100 | `node-role.kubernetes.io/master:In:"true"` | -| 1 | `cattle.io/cluster-agent:In:"true"` | - -_Applies to v2.3.0 up to v2.5.3_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | -| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | - -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. - -The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: - -| Weight | Expression | -| ------ | ------------------------------------------------ | -| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | -| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md deleted file mode 100644 index 324969aad..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: Launching Kubernetes on Windows Clusters -weight: 2240 ---- - -_Available as of v2.3.0_ - -When provisioning a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. - -In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. - -Some other requirements for Windows clusters include: - -- You can only add Windows nodes to a cluster if Windows support is enabled when the cluster is created. Windows support cannot be enabled for existing clusters. -- Kubernetes 1.15+ is required. -- The Flannel network provider must be used. -- Windows nodes must have 50 GB of disk space. - -For the full list of requirements, see [this section.](#requirements-for-windows-clusters) - -For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). - -This guide covers the following topics: - - - -- [Requirements](#requirements-for-windows-clusters) -- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) -- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) - - -# Requirements for Windows Clusters - -The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{}}/rancher/v2.x/en/installation/requirements/). - -### OS and Docker Requirements - -In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): - -- Nodes with Windows Server Core version 1809 should use Basic Docker EE 18.09 or Basic Docker EE 19.03+. -- Nodes with Windows Server Core version 1903+ should use Basic Docker EE 19.03+. - -> **Notes:** -> -> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). -> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. - -### Kubernetes Version - -Kubernetes v1.15+ is required. -The [Kubernetes docs for Windows nodes](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#windows-os-version-support) provide a compatability matrix that can be used to assess which versions of Windows Server are supported based on your Kubernetes cluster version. - -> **More Information on Windows Server Versions:** -> -> - [Release Information](https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info) -> - [Servicing Channels: LTSC and SAC](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19) - -### Node Requirements - -The hosts in the cluster need to have at least: - -- 2 core CPUs -- 5 GB memory -- 50 GB disk space - -Rancher will not provision the node if the node does not meet these requirements. - -### Networking Requirements - -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{}}/rancher/v2.x/en/installation/) before proceeding with this guide. - -Rancher only supports Windows using Flannel as the network provider. - -There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. - -For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. - -If you are configuring DHCP options sets for an AWS virtual private cloud, note that in the `domain-name` option field, only one domain name can be specified. According to the DHCP options [documentation:](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) - -> Some Linux operating systems accept multiple domain names separated by spaces. However, other Linux operating systems and Windows treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name. - -### Architecture Requirements - -The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. - -The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. - -We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: - - - -| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | -| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | Control plane, etcd, worker | Manage the Kubernetes cluster | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | Worker | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | -| Node 3 | Windows (Windows Server core version 1809 or above) | Worker | Run your Windows containers | - -### Container Requirements - -Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. - -### Cloud Provider Specific Requirements - -If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. - -If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: - -- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/gce) -- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. - -# Tutorial: How to Create a Cluster with Windows Support - -This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) - -When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. - -To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. - - - -1. [Provision Hosts](#1-provision-hosts) -1. [Create the Cluster on Existing Nodes](#2-create-the-cluster-on-existing-nodes) -1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) -1. [Optional: Configuration for Azure Files](#4-optional-configuration-for-azure-files) - - -# 1. Provision Hosts - -To begin provisioning a cluster on existing nodes with Windows support, prepare your hosts. - -Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -You will provision three nodes: - -- One Linux node, which manages the Kubernetes control plane and stores your `etcd` -- A second Linux node, which will be another worker node -- The Windows node, which will run your Windows containers as a worker node - -| Node | Operating System | -| ------ | ------------------------------------------------------------ | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | -| Node 3 | Windows (Windows Server core version 1809 or above required) | - -If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) - -# 2. Create the Cluster on Existing Nodes - -The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) with some Windows-specific requirements. - -1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. -1. Click **From existing nodes (Custom)**. -1. Enter a name for your cluster in the **Cluster Name** text box. -1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. -1. In the **Network Provider** field, select **Flannel.** -1. In the **Windows Support** section, click **Enable.** -1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. -1. Click **Next**. - -> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -# 3. Add Nodes to the Cluster - -This section describes how to register your Linux and Worker nodes to your cluster. You will run a command on each node, which will install the Rancher agent and allow Rancher to manage each node. - -### Add Linux Master Node - -In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. - -The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. - -1. In the **Node Operating System** section, click **Linux**. -1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. -1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{}}/rancher/v2.x/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -1. Copy the command displayed on the screen to your clipboard. -1. SSH into your Linux host and run the command that you copied to your clipboard. -1. When you are finished provisioning your Linux node(s), select **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -It may take a few minutes for the node to be registered in your cluster. - -### Add Linux Worker Node - -In this section, we run a command to register the Linux worker node to the cluster. - -After the initial provisioning of your cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. - -1. From the **Global** view, click **Clusters.** -1. Go to the cluster that you created and click **⋮ > Edit.** -1. Scroll down to **Node Operating System**. Choose **Linux**. -1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. -1. Copy the command displayed on screen to your clipboard. -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. -1. From **Rancher**, click **Save**. - -**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. - -> **Note:** Taints on Linux Worker Nodes -> -> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the Windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. - -> | Taint Key | Taint Value | Taint Effect | -> | -------------- | ----------- | ------------ | -> | `cattle.io/os` | `linux` | `NoSchedule` | - -### Add a Windows Worker Node - -In this section, we run a command to register the Windows worker node to the cluster. - -You can add Windows hosts to the cluster by editing the cluster and choosing the **Windows** option. - -1. From the **Global** view, click **Clusters.** -1. Go to the cluster that you created and click **⋮ > Edit.** -1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. -1. Copy the command displayed on screen to your clipboard. -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. -1. From Rancher, click **Save**. -1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. - -**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# Configuration for Storage Classes in Azure - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as an available StorageClass for your cluster. For details, refer to [this section.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md deleted file mode 100644 index b9b99ffc3..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Configuration for Storage Classes in Azure -weight: 3 ---- - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. - -In order to have the Azure platform create the required storage resources, follow these steps: - -1. [Configure the Azure cloud provider.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure) -1. Configure `kubectl` to connect to your cluster. -1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: - - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: system:azure-cloud-provider - rules: - - apiGroups: [''] - resources: ['secrets'] - verbs: ['get','create'] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: system:azure-cloud-provider - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:azure-cloud-provider - subjects: - - kind: ServiceAccount - name: persistent-volume-binder - namespace: kube-system - -1. Create these in your cluster using one of the follow command. - - ``` - # kubectl create -f - ``` diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md deleted file mode 100644 index 9e3a8a692..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: v2.1.x and v2.2.x Windows Documentation (Experimental) -weight: 9100 ---- - -_Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ - -This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). - -When you create a [custom cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. - -You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. - ->**Important:** In versions of Rancher before v2.3, support for Windows nodes is experimental. Therefore, it is not recommended to use Windows nodes for production environments if you are using Rancher before v2.3. - -This guide walks you through create of a custom cluster that includes three nodes: - -- A Linux node, which serves as a Kubernetes control plane node -- Another Linux node, which serves as a Kubernetes worker used to support Ingress for the cluster -- A Windows node, which is assigned the Kubernetes worker role and runs your Windows containers - -For a summary of Kubernetes features supported in Windows, see [Using Windows in Kubernetes](https://kubernetes.io/docs/setup/windows/intro-windows-in-kubernetes/). - -## OS and Container Requirements - -- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1809 or above. -- You must build containers on a Windows Server core version 1809 or above to run these containers on the same server version. - -## Objectives for Creating Cluster with Windows Support - -When setting up a custom cluster with support for Windows nodes and containers, complete the series of tasks below. - - - -- [1. Provision Hosts](#1-provision-hosts) -- [2. Cloud-host VM Networking Configuration](#2-cloud-hosted-vm-networking-configuration) -- [3. Create the Custom Cluster](#3-create-the-custom-cluster) -- [4. Add Linux Host for Ingress Support](#4-add-linux-host-for-ingress-support) -- [5. Adding Windows Workers](#5-adding-windows-workers) -- [6. Cloud-host VM Routes Configuration](#6-cloud-hosted-vm-routes-configuration) - - - -## 1. Provision Hosts - -To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements]({{}}/rancher/v2.x/en/installation/requirements/)—two Linux, one Windows. Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -The table below lists the [Kubernetes roles]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) you'll assign to each host, although you won't enable these roles until further along in the configuration process—we're just informing you of each node's purpose. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane, although, in this use case, we're installing all three roles on this node. Node 2 is also a Linux worker, which is responsible for Ingress support. Finally, the third node is your Windows worker, which will run your Windows applications. - -Node | Operating System | Future Cluster Role(s) ---------|------------------|------ -Node 1 | Linux (Ubuntu Server 16.04 recommended) | Control plane, etcd, worker -Node 2 | Linux (Ubuntu Server 16.04 recommended) | Worker (This node is used for Ingress support) -Node 3 | Windows (Windows Server core version 1809 or above) | Worker - -### Requirements - -- You can view node requirements for Linux and Windows nodes in the [installation section]({{}}/rancher/v2.x/en/installation/requirements/). -- All nodes in a virtualization cluster or a bare metal cluster must be connected using a layer 2 network. -- To support [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), your cluster must include at least one Linux node dedicated to the worker role. -- Although we recommend the three node architecture listed in the table above, you can add additional Linux and Windows workers to scale up your cluster for redundancy. - - -## 2. Cloud-hosted VM Networking Configuration - ->**Note:** This step only applies to nodes hosted on cloud-hosted virtual machines. If you're using virtualization clusters or bare-metal servers, skip ahead to [Create the Custom Cluster](#3-create-the-custom-cluster). - -If you're hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. - -Service | Directions to disable private IP address checks ---------|------------------------------------------------ -Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) -Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) -Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) - -## 3. Create the Custom Cluster - -To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), starting from 2. Create the Custom Cluster. While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. - - -### Enable the Windows Support Option - -While choosing **Cluster Options**, set **Windows Support (Experimental)** to **Enabled**. - -After you select this option, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) from [step 6]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-6). - -### Networking Option - -When choosing a network provider for a cluster that supports Windows, the only option available is Flannel, as [host-gw](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) is needed for IP routing. - -If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. - -### Node Configuration - -The first node in your cluster should be a Linux host that fills the Control Plane role. This role must be fulfilled before you can add Windows hosts to your cluster. At minimum, the node must have this role enabled, but we recommend enabling all three. The following table lists our recommended settings (we'll provide the recommended settings for nodes 2 and 3 later). - -Option | Setting --------|-------- -Node Operating System | Linux -Node Roles | etcd
Control Plane
Worker - -When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) from [step 8]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). - - - -## 4. Add Linux Host for Ingress Support - -After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Add another Linux host, which will be used to support Ingress for your cluster. - -1. Using the content menu, open the custom cluster your created in [2. Create the Custom Cluster](#3-create-the-custom-cluster). - -1. From the main menu, select **Nodes**. - -1. Click **Edit Cluster**. - -1. Scroll down to **Node Operating System**. Choose **Linux**. - -1. Select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. - -1. From **Rancher**, click **Save**. - -**Result:** The worker role is installed on your Linux host, and the node registers with Rancher. - -## 5. Adding Windows Workers - -You can add Windows hosts to a custom cluster by editing the cluster and choosing the **Windows** option. - -1. From the main menu, select **Nodes**. - -1. Click **Edit Cluster**. - -1. Scroll down to **Node Operating System**. Choose **Windows**. - -1. Select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. - -1. From Rancher, click **Save**. - -1. **Optional:** Repeat these instruction if you want to add more Windows nodes to your cluster. - -**Result:** The worker role is installed on your Windows host, and the node registers with Rancher. - -## 6. Cloud-hosted VM Routes Configuration - -In Windows clusters, containers communicate with each other using the `host-gw` mode of Flannel. In `host-gw` mode, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. - -- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. - -- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. - -To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: - -```bash -kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR -``` - -Then follow the instructions for each cloud provider to configure routing rules for each node: - -Service | Instructions ---------|------------- -Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). -Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). - - -` ` diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md deleted file mode 100644 index ee075c394..000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Networking Requirements for Host Gateway (L2bridge) -weight: 1000 ---- - -This section describes how to configure custom Windows clusters that are using *Host Gateway (L2bridge)* mode. - -### Disabling Private IP Address Checks - -If you are using *Host Gateway (L2bridge)* mode and hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. - -Service | Directions to disable private IP address checks ---------|------------------------------------------------ -Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) -Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) (By default, a VM cannot forward a packet originated by another VM) -Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) - -### Cloud-hosted VM Routes Configuration - -If you are using the [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) backend of Flannel, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. - -- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. - -- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. - -To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: - -```bash -kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR -``` - -Then follow the instructions for each cloud provider to configure routing rules for each node: - -Service | Instructions ---------|------------- -Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). -Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). diff --git a/content/rancher/v2.x/en/contributing/_index.md b/content/rancher/v2.x/en/contributing/_index.md deleted file mode 100644 index ab7003754..000000000 --- a/content/rancher/v2.x/en/contributing/_index.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Contributing to Rancher -weight: 27 -aliases: - - /rancher/v2.x/en/faq/contributing/ ---- - -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. - -For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: - -- How to set up the Rancher development environment and run tests -- The typical flow of an issue through the development lifecycle -- Coding guidelines and development best practices -- Debugging and troubleshooting -- Developing the Rancher API - -On the Rancher Users Slack, the channel for developers is **#developer**. - -# Repositories - -All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. - -Repository | URL | Description ------------|-----|------------- -Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. -Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. -API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. -User Interface | https://github.com/rancher/ui | This repository is the source of the UI. -(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. -machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. -kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. -RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. -CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. -(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. -Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. -loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. - -To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. - -![Rancher diagram]({{}}/img/rancher/ranchercomponentsdiagram.svg)
-Rancher components used for provisioning/managing Kubernetes clusters. - -# Building - -Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. - -The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. - -# Bugs, Issues or Questions - -If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. - -If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). - -### Checklist for Filing Issues - -Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. - ->**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. ->**Important:** Please remove any sensitive data as it will be publicly viewable. - -- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce - - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used - - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` - - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer - - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host - - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it -- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. -- **Logs:** Provide data/logs from the used resources. - - Rancher - - Docker install - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') - ``` - - Kubernetes install using `kubectl` - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - -l app=rancher \ - --timestamps=true - ``` - - Docker install using `docker` on each of the nodes in the RKE cluster - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') - ``` - - Kubernetes Install with RKE Add-On - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - --timestamps=true \ - -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') - ``` - - System logging (these might not all exist, depending on operating system) - - `/var/log/messages` - - `/var/log/syslog` - - `/var/log/kern.log` - - Docker daemon logging (these might not all exist, depending on operating system) - - `/var/log/docker.log` -- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -# Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/content/rancher/v2.x/en/deploy-across-clusters/_index.md b/content/rancher/v2.x/en/deploy-across-clusters/_index.md deleted file mode 100644 index 3f8c114c4..000000000 --- a/content/rancher/v2.x/en/deploy-across-clusters/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Deploying Applications across Clusters -weight: 13 ---- - - - -### Fleet - -_Available in v2.5_ - -Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. - -Fleet is GitOps at scale. For more information, refer to the [Fleet section.](./fleet) - -### Multi-cluster Apps - -In Rancher before v2.5, the multi-cluster apps feature was used to deploy applications across clusters. Refer to the documentation [here.](./multi-cluster-apps) \ No newline at end of file diff --git a/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md b/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md deleted file mode 100644 index a697b1a39..000000000 --- a/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Fleet - GitOps at Scale -shortTitle: Rancher v2.5 -weight: 1 ---- - -_Available as of Rancher v2.5_ - -Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. - -Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. - -![Architecture]({{}}/img/rancher/fleet-architecture.png) - -Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to -deploy everything in the cluster. This give a high degree of control, consistency, and auditability. Fleet focuses not only on the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster. - -### Accessing Fleet in the Rancher UI - -Fleet comes preinstalled in Rancher v2.5. To access it, go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > Continuous Delivery.** On this page, you can edit Kubernetes resources and cluster groups managed by Fleet. - -### Windows Support - -Before Rancher v2.5.6, the `agent` did not have native Windows manifests on downstream clusters with Windows nodes. - -This would result in a failing `agent` pod for the cluster. -If you are upgrading from an older version of Rancher to v2.5.6+, you can deploy a working `agent` with the following workflow *in the downstream cluster*: - -1. Cordon all Windows nodes. -1. Apply the below toleration to the `agent` workload. -1. Uncordon all Windows nodes. -1. Delete all `agent` pods. New pods should be created with the new toleration. -1. Once the `agent` pods are running, and auto-update is enabled for Fleet, they should be updated to a Windows-compatible `agent` version. - -```yaml -tolerations: -- effect: NoSchedule - key: cattle.io/os - operator: Equal - value: linux -``` - -### GitHub Repository - -The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/releases/latest) - -### Documentation - -The Fleet documentation is at [https://fleet.rancher.io/.](https://fleet.rancher.io/) diff --git a/content/rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps/_index.md b/content/rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps/_index.md deleted file mode 100644 index 592798311..000000000 --- a/content/rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps/_index.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Multi-cluster Apps -shortTitle: Rancher v2.2-v2.4 -weight: 2 ---- - -_Available as of v2.2.0_ - -> As of Rancher v2.5, we recommend using [Fleet]({{}}/rancher/v2.x/en/deploy-across-clusters/fleet) for deploying apps across clusters. - -Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. - -Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. - -After creating a multi-cluster application, you can program a [Global DNS entry]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/) to make it easier to access the application. - -- [Prerequisites](#prerequisites) -- [Launching a multi-cluster app](#launching-a-multi-cluster-app) -- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) - - [Targets](#targets) - - [Upgrades](#upgrades) - - [Roles](#roles) -- [Application configuration options](#application-configuration-options) - - [Using a questions.yml file](#using-a-questions-yml-file) - - [Key value pairs for native Helm charts](#key-value-pairs-for-native-helm-charts) - - [Members](#members) - - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) -- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) -- [Multi-cluster application management](#multi-cluster-application-management) -- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) - -# Prerequisites - -To create a multi-cluster app in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the clusters(s) that include the target project(s) - -# Launching a Multi-Cluster App - -1. From the **Global** view, choose **Apps** in the navigation bar. Click **Launch**. - -2. Find the application that you want to launch, and then click **View Details**. - -3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. - -4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. - -5. Select a **Template Version**. - -6. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). - -7. Select the **Members** who can [interact with the multi-cluster application](#members). - -8. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. - -7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: - -# Multi-cluster App Configuration Options - -Rancher has divided the configuration option for the multi-cluster application into several sections. - -### Targets - -In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. - -### Upgrades - -In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. - -* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. - -* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. - -### Roles - -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{}}/rancher/v2.x/en/catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. - -For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. - -Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. - -- **Project** - This is the equivalent of a [project member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. - -- **Cluster** - This is the equivalent of a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. - -When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. - -> **Note:** There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. - -# Application Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.x/en/catalog/custom/), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -### Members - -By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. - -1. Find the user that you want to add by typing in the member's name in the **Member** search box. - -2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. - - - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. - - > **Note:** Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. - -### Overriding Application Configuration Options for Specific Projects - -The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. - -1. In the **Answer Overrides** section, click **Add Override**. - -2. For each override, you can select the following: - - - **Scope**: Select which target projects you want to override the answer in the configuration option. - - - **Question**: Select which question you want to override. - - - **Answer**: Enter the answer that you want to be used instead. - -# Upgrading Multi-Cluster App Roles and Projects - -- **Changing Roles on an existing Multi-Cluster app** -The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. - -- **Adding/Removing target projects** -1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. -2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. - - -# Multi-Cluster Application Management - -One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: - - * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. - * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). - * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. - -# Deleting a Multi-Cluster Application - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. - - > **Note:** The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. diff --git a/content/rancher/v2.x/en/faq/_index.md b/content/rancher/v2.x/en/faq/_index.md deleted file mode 100644 index de15dd4cb..000000000 --- a/content/rancher/v2.x/en/faq/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: FAQ -weight: 25 -aliases: - - /rancher/v2.x/en/about/ ---- - -This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. - -See [Technical FAQ]({{}}/rancher/v2.x/en/faq/technical/), for frequently asked technical questions. - -
- -**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** - -When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. - -
- -**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** - -Yes. - -
- -**Does Rancher support Windows?** - -As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) - -
- -**Does Rancher support Istio?** - -As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) - -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) - -
- -**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** - -Secrets management is on our roadmap but we haven't assigned it to a specific release yet. - -
- -**Does Rancher v2.x support RKT containers as well?** - -At this time, we only support Docker. - -
- -**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes?** - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. - -
- -**Are you planning on supporting Traefik for existing setups?** - -We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. - -
- -**Can I import OpenShift Kubernetes clusters into v2.x?** - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -
- -**Are you going to integrate Longhorn?** - -Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project. \ No newline at end of file diff --git a/content/rancher/v2.x/en/faq/deprecated-features-25x/_index.md b/content/rancher/v2.x/en/faq/deprecated-features-25x/_index.md deleted file mode 100644 index a30b42122..000000000 --- a/content/rancher/v2.x/en/faq/deprecated-features-25x/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Deprecated Features in Rancher v2.5 -weight: 100 ---- - -### What is Rancher's Deprecation policy? - -Starting in Rancher 2.5 we have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). - -### Where can I find out which features have been deprecated in Rancher 2.5? - -Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases/tag/v2.5.0) for Rancher found on GitHub. - -### What can I expect when a feature is marked for deprecation? - -In the release where functionality is marked as Deprecated it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/content/rancher/v2.x/en/faq/kubectl/_index.md b/content/rancher/v2.x/en/faq/kubectl/_index.md deleted file mode 100644 index ffd8eee67..000000000 --- a/content/rancher/v2.x/en/faq/kubectl/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Installing and Configuring kubectl -weight: 100 ---- - -`kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. - -### Installation - -See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. - -### Configuration - -When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. - -You can copy this file to `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_cluster.yml`. - -``` -export KUBECONFIG=$(pwd)/kube_config_cluster.yml -``` - -Test your connectivity with `kubectl` and see if you can get the list of nodes back. - -``` -kubectl get nodes - NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1 -``` diff --git a/content/rancher/v2.x/en/faq/networking/_index.md b/content/rancher/v2.x/en/faq/networking/_index.md deleted file mode 100644 index 863ad9716..000000000 --- a/content/rancher/v2.x/en/faq/networking/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Networking -weight: 8005 ---- - -Networking FAQ's - -- [CNI Providers]({{}}/rancher/v2.x/en/faq/networking/cni-providers/) - diff --git a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md deleted file mode 100644 index 3618bbb30..000000000 --- a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Container Network Interface (CNI) Providers -description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you -weight: 2300 ---- - -## What is CNI? - -CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. - -Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. - -![CNI Logo]({{}}/img/rancher/cni-logo.png) - -For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). - -### What Network Models are Used in CNI? - -CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). - -#### What is an Encapsulated Network? - -This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. - -In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. - -This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. - -CNI network providers using this network model include Flannel, Canal, and Weave. - -![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) - -#### What is an Unencapsulated Network? - -This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). - -In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. - -This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. - -CNI network providers using this network model include Calico and Romana. - -![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) - -### What CNI Providers are Provided by Rancher? - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. - -#### Canal - -![Canal Logo]({{}}/img/rancher/canal-logo.png) - -Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. - -In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) - -{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} - -For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) - -#### Flannel - -![Flannel Logo]({{}}/img/rancher/flannel-logo.png) - -Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). - -Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) - -For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). - -#### Calico - -![Calico Logo]({{}}/img/rancher/calico-logo.png) - -Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. - -Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. - -Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) - -For more information, see the following pages: - -- [Project Calico Official Site](https://www.projectcalico.org/) -- [Project Calico GitHub Page](https://github.com/projectcalico/calico) - - -#### Weave - -![Weave Logo]({{}}/img/rancher/weave-logo.png) - -_Available as of v2.2.0_ - -Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. - -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -For more information, see the following pages: - -- [Weave Net Official Site](https://www.weave.works/) - -### CNI Features by Provider - -The following table summarizes the different features available for each CNI network provider provided by Rancher. - -| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | -| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | -| Canal | Encapsulated (VXLAN) | No | Yes | No | K8S API | No | Yes | -| Flannel | Encapsulated (VXLAN) | No | No | No | K8S API | No | No | -| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8S API | No | Yes | -| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | - -- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) - -- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. - -- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. - -- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. - -- External Datastore: CNI network providers with this feature need an external datastore for its data. - -- Encryption: This feature allows cyphered and secure network control and data planes. - -- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. - -#### CNI Community Popularity - -The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. - -| Provider | Project | Stars | Forks | Contributors | -| ---- | ---- | ---- | ---- | ---- | -| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | -| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | -| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | -| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 | - -
-### Which CNI Provider Should I Use? - -It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. - -As of Rancher v2.0.7, Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. - -### How can I configure a CNI network provider? - -Please see [Cluster Options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.x/en/faq/removing-rancher/_index.md b/content/rancher/v2.x/en/faq/removing-rancher/_index.md deleted file mode 100644 index b3ebb1063..000000000 --- a/content/rancher/v2.x/en/faq/removing-rancher/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Rancher is No Longer Needed -weight: 8010 -aliases: - - /rancher/v2.x/en/installation/removing-rancher/cleaning-cluster-nodes/ - - /rancher/v2.x/en/installation/removing-rancher/ - - /rancher/v2.x/en/admin-settings/removing-rancher/ - - /rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/ ---- - -This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. - -- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) -- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) -- [What if I don't want my imported cluster managed by Rancher?](#what-if-i-don-t-want-my-imported-cluster-managed-by-rancher) -- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) - -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? - -If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. - -### If the Rancher server is deleted, how do I access my downstream clusters? - -The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: - -- **Imported clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. -- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. -- **RKE clusters:** To access an [RKE cluster,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) the cluster must have the [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.]({{}}/rancher/v2.x/en/overview/architecture/#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. - -### What if I don't want Rancher anymore? - -If you [installed Rancher on a Kubernetes cluster,]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) remove Rancher by using the [System Tools]({{}}/rancher/v2.x/en/system-tools/) with the `remove` subcommand. - -If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. - -Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) - -### What if I don't want my imported cluster managed by Rancher? - -If an imported cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was imported into Rancher. - -To detach the cluster, - -1. From the **Global** view in Rancher, go to the **Clusters** tab. -2. Go to the imported cluster that should be detached from Rancher and click **⋮ > Delete.** -3. Click **Delete.** - -**Result:** The imported cluster is detached from Rancher and functions normally outside of Rancher. - -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? - -At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. - -The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) - -For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/content/rancher/v2.x/en/faq/security/_index.md b/content/rancher/v2.x/en/faq/security/_index.md deleted file mode 100644 index f9d6ec864..000000000 --- a/content/rancher/v2.x/en/faq/security/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Security -weight: 8007 - ---- - -**Is there a Hardening Guide?** - -The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.x/en/security/) section. - -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** - -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.x/en/security/) section. diff --git a/content/rancher/v2.x/en/faq/technical/_index.md b/content/rancher/v2.x/en/faq/technical/_index.md deleted file mode 100644 index 62c3d7589..000000000 --- a/content/rancher/v2.x/en/faq/technical/_index.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: Technical -weight: 8006 ---- - -### How can I reset the administrator password? - -Docker Install: -``` -$ docker exec -ti reset-password -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password -New password for default administrator (user-xxxxx): - -``` - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- reset-password -New password for default administrator (user-xxxxx): - -``` - -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: -``` -$ docker exec -ti ensure-default-admin -New default administrator (user-xxxxx) -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin -New password for default administrator (user-xxxxx): - -``` - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- ensure-default-admin -New password for default admin user (user-xxxxx): - -``` - -### How can I enable debug logging? - -See [Troubleshooting: Logging]({{}}/rancher/v2.x/en/troubleshooting/logging/) - -### My ClusterIP does not respond to ping - -ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. - -### Where can I manage Node Templates? - -Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. - -### Why is my Layer-4 Load Balancer in `Pending` state? - -The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) - -### Where is the state of Rancher stored? - -- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. -- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. - -### How are the supported Docker versions determined? - -We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. - -### How can I access nodes created by Rancher? - -SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. - -![Download Keys]({{}}/img/rancher/downloadsshkeys.png) - -Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) - -``` -$ ssh -i id_rsa user@ip_of_node -``` - -### How can I automate task X in Rancher? - -The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: - -* Visit `https://your_rancher_ip/v3` and browse the API options. -* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) - -### The IP address of a node changed, how can I recover? - -A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. - -When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) to clean the node. - -When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. - -### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? - -You can add additional arguments/binds/environment variables via the [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{}}/rke/latest/en/example-yamls/). - -### How do I check if my certificate chain is valid? - -Use the `openssl verify` command to validate your certificate chain: - ->**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). - -``` ------BEGIN CERTIFICATE----- -%YOUR_CERTIFICATE% ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -%YOUR_INTERMEDIATE_CERTIFICATE% ------END CERTIFICATE----- -``` - -If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: - -``` -openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem -subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com -issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA -``` - -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? - -Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. - -Check `Common Name`: - -``` -openssl x509 -noout -subject -in cert.pem -subject= /CN=rancher.my.org -``` - -Check `Subject Alternative Names`: - -``` -openssl x509 -noout -in cert.pem -text | grep DNS - DNS:rancher.my.org -``` - -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? - -This is due to a combination of the following default Kubernetes settings: - -* kubelet - * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) -* kube-controller-manager - * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) - * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) - * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) - -See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. - -In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. - -* kube-apiserver (Kubernetes v1.13 and up) - * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. - * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. - -### Can I use keyboard shortcuts in the UI? - -Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/content/rancher/v2.x/en/faq/telemetry/_index.md b/content/rancher/v2.x/en/faq/telemetry/_index.md deleted file mode 100644 index 6ab582667..000000000 --- a/content/rancher/v2.x/en/faq/telemetry/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Telemetry -weight: 8008 ---- - -### What is Telemetry? - -Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. - -### What information is collected? - -No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. - -The primary things collected include: - - - Aggregate counts (smallest, average, largest, total) of nodes per-cluster and their size (e.g. CPU cores & RAM). - - Aggregate counts of logical resources like Clusters, Projects, Namespaces, and Pods. - - Counts of what driver was used to deploy clusters and nodes (e.g. GKE vs EC2 vs Imported vs Custom). - - Versions of Kubernetes components, Operating Systems and Docker that are deployed on nodes. - - Whether some optional components are enabled or not (e.g. which auth providers are used). - - The image name & version of Rancher that is running. - - A unique randomly-generated identifier for this installation. - -### Can I see the information that is being sent? - -If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. - -If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. - -### How do I turn it on or off? - -After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md b/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md deleted file mode 100644 index e0aa7ff6a..000000000 --- a/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Questions about Upgrading to Rancher v2.x -weight: 1 ---- - -This page contains frequently asked questions about the changes between Rancher v1.x and v2.x, and how to upgrade from Rancher v1.x to v2.x. - -# Kubernetes - -**What does it mean when you say Rancher v2.x is built on Kubernetes?** - -Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. - -
- -**Do you plan to implement upstream Kubernetes, or continue to work on your own fork?** - -We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. - -
- -**Does this release mean that we need to re-train our support staff in Kubernetes?** - -Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. - -
- -**Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI.** - -No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. - -
- -**If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure?** - -Absolutely. - -# Cattle - -**How does Rancher v2.x affect Cattle?** - -Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. - -
- -**Can I migrate existing Cattle workloads into Kubernetes?** - -Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. - -# Feature Changes - -**Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x?** - -Yes. You can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. - -
- -**Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC?** - -The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. - -
- -**Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x?** - -Yes. You can do so by leveraging Kubernetes' network policies. - -
- -**What about the CLI? Will that work the same way with the same features?** - -Yes. Definitely. - -# Environments & Clusters - -**Can I still create templates for environments and clusters?** - -Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. - -Kubernetes RKE Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. - -
- -**Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher)** - -Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. - -# Upgrading/Migrating - -**How would the migration from v1.x to v2.x work?** - -Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. - -
- -**Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters?** - -At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. - -# Support - -**Are you planning some long-term support releases for Rancher v1.6?** - -That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it. New releases of the v1.6 stream are announced in the [Rancher forums.](https://forums.rancher.com/c/announcements) The Rancher wiki contains the [v1.6 release notes.](https://github.com/rancher/rancher/wiki/Rancher-1.6) \ No newline at end of file diff --git a/content/rancher/v2.x/en/helm-charts/_index.md b/content/rancher/v2.x/en/helm-charts/_index.md deleted file mode 100644 index d517e9e4f..000000000 --- a/content/rancher/v2.x/en/helm-charts/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Helm Charts in Rancher -weight: 12 ---- - -### Apps and Marketplace - -In Rancher v2.5, the [apps and marketplace feature](./apps-marketplace) is used to manage Helm charts, replacing the catalog system. - -### Catalogs - -In Rancher before v2.5, the [catalog system](./legacy-catalogs) was used to manage Helm charts. \ No newline at end of file diff --git a/content/rancher/v2.x/en/helm-charts/apps-marketplace/_index.md b/content/rancher/v2.x/en/helm-charts/apps-marketplace/_index.md deleted file mode 100644 index f2bc4b50a..000000000 --- a/content/rancher/v2.x/en/helm-charts/apps-marketplace/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Apps and Marketplace -shortTitle: Rancher v2.5 -weight: 1 ---- - -_Available as of v2.5_ - -In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. - -In the cluster manager Rancher uses a catalog system to import bundles of charts and then uses those charts to either deploy custom helm applications or Rancher's tools such as Monitoring or Istio. Now in the Cluster Explorer, Rancher uses a similar but simplified version of the same system. Repositories can be added in the same way that catalogs were, but are specific to the current cluster. Rancher tools come as pre-loaded repositories which deploy as standalone helm charts. - -### Charts - -From the top-left menu select _"Apps & Marketplace"_ and you will be taken to the Charts page. - -The charts page contains all Rancher, Partner, and Custom Charts. - -* Rancher tools such as Logging or Monitoring are included under the Rancher label -* Partner charts reside under the Partners label -* Custom charts will show up under the name of the repository - -All three types are deployed and managed in the same way. - -> Apps managed by the Cluster Manager should continue to be managed only by the Cluster Manager, and apps managed with the Cluster Explorer must be managed only by the Cluster Explorer. - -### Repositories - -From the left sidebar select _"Repositories"_. - -These items represent helm repositories, and can be either traditional helm endpoints which have an index.yaml, or git repositories which will be cloned and can point to a specific branch. In order to use custom charts, simply add your repository here and they will become available in the Charts tab under the name of the repository. - - -### Helm Compatibility - -The Cluster Explorer only supports Helm 3 compatible charts. - - -### Deployment and Upgrades - -From the _"Charts"_ tab select a Chart to install. Rancher and Partner charts may have extra configurations available through custom pages or questions.yaml files, but all chart installations can modify the values.yaml and other basic settings. Once you click install, a Helm operation job is deployed, and the console for the job is displayed. - -To view all recent changes, go to the _"Recent Operations"_ tab. From there you can view the call that was made, conditions, events, and logs. - -After installing a chart, you can find it in the _"Installed Apps"_ tab. In this section you can upgrade or delete the installation, and see further details. When choosing to upgrade, the form and values presented will be the same as installation. - -Most Rancher tools have additional pages located in the toolbar below the _"Apps & Marketplace"_ section to help manage and use the features. These pages include links to dashboards, forms to easily add Custom Resources, and additional information. diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/_index.md deleted file mode 100644 index c8606dfc4..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Rancher v2.0-v2.4 Catalogs (Deprecated) -shortTitle: Rancher v2.0-v2.4 -description: Rancher enables the use of catalogs to repeatedly deploy applications easily. Catalogs are GitHub or Helm Chart repositories filled with deployment-ready apps. -weight: 1 -aliases: - - /rancher/v2.x/en/concepts/global-configuration/catalog/ - - /rancher/v2.x/en/concepts/catalogs/ - - /rancher/v2.x/en/tasks/global-configuration/catalog/ - - /rancher/v2.x/en/catalog - - /rancher/v2.x/en/catalog/apps ---- - -> As of Rancher v2.5, the catalog system is deprecated and has been replaced with [Apps and Marketplace]({{}}/rancher/v2.x/en/helm-charts/apps-marketplace) in the Cluster Explorer. - -Rancher provides the ability to use a catalog of Helm charts that make it easy to repeatedly deploy applications. - -- **Catalogs** are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. -- **Helm charts** are a collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on. - -Rancher improves on Helm catalogs and charts. All native Helm charts can work within Rancher, but Rancher adds several enhancements to improve their user experience. - -This section covers the following topics: - -- [Catalog scopes](#catalog-scopes) -- [Catalog Helm Deployment Versions](#catalog-helm-deployment-versions) -- [When to use Helm 3](#when-to-use-helm-3) -- [Helm 3 Backwards Compatibility](#helm-3-backwards-compatibility) -- [Built-in global catalogs](#built-in-global-catalogs) -- [Custom catalogs](#custom-catalogs) -- [Creating and launching applications](#creating-and-launching-applications) -- [Chart compatibility with Rancher](#chart-compatibility-with-rancher) -- [Global DNS](#global-dns) - -# Catalog Scopes - -Within Rancher, you can manage catalogs at three different scopes. Global catalogs are shared across all clusters and project. There are some use cases where you might not want to share catalogs between different clusters or even projects in the same cluster. By leveraging cluster and project scoped catalogs, you will be able to provide applications for specific teams without needing to share them with all clusters and/or projects. - -Scope | Description | Available As of | ---- | --- | --- | -Global | All clusters and all projects can access the Helm charts in this catalog | v2.0.0 | -Cluster | All projects in the specific cluster can access the Helm charts in this catalog | v2.2.0 | -Project | This specific cluster can access the Helm charts in this catalog | v2.2.0 | - -# Catalog Helm Deployment Versions - -_Applicable as of v2.4.0_ - -In November 2019, Helm 3 was released, and some features were deprecated or refactored. It is not fully [backwards compatible]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/#helm-3-backwards-compatibility) with Helm 2. Therefore, catalogs in Rancher need to be separated, with each catalog only using one Helm version. This will help reduce app deployment issues as your Rancher users will not need to know which version of your chart is compatible with which Helm version - they can just select a catalog, select an app and deploy a version that has already been vetted for compatibility. - -When you create a custom catalog, you will have to configure the catalog to use either Helm 2 or Helm 3. This version cannot be changed later. If the catalog is added with the wrong Helm version, it will need to be deleted and re-added. - -When you launch a new app from a catalog, the app will be managed by the catalog's Helm version. A Helm 2 catalog will use Helm 2 to manage all of the apps, and a Helm 3 catalog will use Helm 3 to manage all apps. - -By default, catalogs are assumed to be deployed using Helm 2. If you run an app in Rancher before v2.4.0, then upgrade to Rancher v2.4.0+, the app will still be managed by Helm 2. If the app was already using a Helm 3 Chart (API version 2) it will no longer work in v2.4.0+. You must either downgrade the chart's API version or recreate the catalog to use Helm 3. - -Charts that are specific to Helm 2 should only be added to a Helm 2 catalog, and Helm 3 specific charts should only be added to a Helm 3 catalog. - -# When to use Helm 3 - -_Applicable as of v2.4.0_ - -- If you want to ensure that the security permissions are being pulled from the kubeconfig file -- If you want to utilize apiVersion `v2` features such as creating a library chart to reduce code duplication, or moving your requirements from the `requirements.yaml` into the `Chart.yaml` - -Overall Helm 3 is a movement towards a more standardized Kubernetes feel. As the Kubernetes community has evolved, standards and best practices have as well. Helm 3 is an attempt to adopt those practices and streamline how charts are maintained. - -# Helm 3 Backwards Compatibility - -_Applicable as of v2.4.0_ - -With the use of the OpenAPI schema to validate your rendered templates in Helm 3, you will find charts that worked in Helm 2 may not work in Helm 3. This will require you to update your chart templates to meet the new validation requirements. This is one of the main reasons support for Helm 2 and Helm 3 was provided starting in Rancher 2.4.x, as not all charts can be deployed immediately in Helm 3. - -Helm 3 does not create a namespace for you, so you will have to provide an existing one. This can cause issues if you have integrated code with Helm 2, as you will need to make code changes to ensure a namespace is being created and passed in for Helm 3. Rancher will continue to manage namespaces for Helm to ensure this does not impact your app deployment. - -apiVersion `v2` is now reserved for Helm 3 charts. This apiVersion enforcement could cause issues as older versions of Helm 2 did not validate the apiVersion in the `Chart.yaml` file. In general, your Helm 2 chart’s apiVersion should be set to `v1` and your Helm 3 chart’s apiVersion should be set to `v2`. You can install charts with apiVersion `v1` with Helm 3, but you cannot install `v2` charts into Helm 2. - -# Built-in Global Catalogs - -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. For details, refer to the section on managing [built-in global catalogs.]({{}}/rancher/v2.x/en/catalog/built-in) - -# Custom Catalogs - -There are two types of catalogs in Rancher: [Built-in global catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) and [custom catalogs.]({{}}/rancher/v2.x/en/catalog/adding-catalogs/) - -Any user can create custom catalogs to add into Rancher. Custom catalogs can be added into Rancher at the global level, cluster level, or project level. For details, refer to the [section on adding custom catalogs]({{}}/rancher/v2.x/en/catalog/adding-catalogs) and the [catalog configuration reference.]({{}}/rancher/v2.x/en/catalog/catalog-config) - -# Creating and Launching Applications - -In Rancher, applications are deployed from the templates in a catalog. This section covers the following topics: - -* [Multi-cluster applications]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) -* [Creating catalog apps]({{}}/rancher/v2.x/en/catalog/creating-apps) -* [Launching catalog apps within a project]({{}}/rancher/v2.x/en/catalog/launching-apps) -* [Managing catalog apps]({{}}/rancher/v2.x/en/catalog/managing-apps) -* [Tutorial: Example custom chart creation]({{}}/rancher/v2.x/en/catalog/tutorial) - -# Chart Compatibility with Rancher - -Charts now support the fields `rancher_min_version` and `rancher_max_version` in the [`questions.yml` file](https://github.com/rancher/integration-test-charts/blob/master/charts/chartmuseum/v1.6.0/questions.yml) to specify the versions of Rancher that the chart is compatible with. When using the UI, only app versions that are valid for the version of Rancher running will be shown. API validation is done to ensure apps that don't meet the Rancher requirements cannot be launched. An app that is already running will not be affected on a Rancher upgrade if the newer Rancher version does not meet the app's requirements. - -# Global DNS - -_Available as v2.2.0_ - -When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. - -For more information on how to use this feature, see [Global DNS]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/). diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/_index.md deleted file mode 100644 index 917b9f5f5..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Creating Custom Catalogs -weight: 200 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/catalog/adding-custom-catalogs/ - - /rancher/v2.x/en/catalog/custom/adding - - /rancher/v2.x/en/catalog/adding-catalogs - - /rancher/v2.x/en/catalog/custom/ ---- - -Custom catalogs can be added into Rancher at a global scope, cluster scope, or project scope. - -- [Adding catalog repositories](#adding-catalog-repositories) - - [Add custom Git repositories](#add-custom-git-repositories) - - [Add custom Helm chart repositories](#add-custom-helm-chart-repositories) - - [Add private Git/Helm chart repositories](#add-private-git-helm-chart-repositories) -- [Adding global catalogs](#adding-global-catalogs) -- [Adding cluster level catalogs](#adding-cluster-level-catalogs) -- [Adding project level catalogs](#adding-project-level-catalogs) -- [Custom catalog configuration reference](#custom-catalog-configuration-reference) - -# Adding Catalog Repositories - -Adding a catalog is as simple as adding a catalog name, a URL and a branch name. - -**Prerequisite:** An [admin]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) of Rancher has the ability to add or remove catalogs globally in Rancher. - -### Add Custom Git Repositories -The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will use the `master` branch by default. Whenever you add a catalog to Rancher, it will be available immediately. - -### Add Custom Helm Chart Repositories - -A Helm chart repository is an HTTP server that houses one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. - -Helm comes with built-in package server for developer testing (helm serve). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). - -In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. - -### Add Private Git/Helm Chart Repositories -_Available as of v2.2.0_ - -Private catalog repositories can be added using credentials like Username and Password. You may also want to use the OAuth token if your Git or Helm repository server supports that. - -For more information on private Git/Helm catalogs, refer to the [custom catalog configuration reference.]({{}}/rancher/v2.x/en/catalog/catalog-config) - - 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. - 2. Click **Add Catalog**. - 3. Complete the form and click **Create**. - - **Result:** Your catalog is added to Rancher. - -# Adding Global Catalogs - ->**Prerequisites:** In order to manage the [built-in catalogs]({{}}/rancher/v2.x/en/catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. - - 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. - 2. Click **Add Catalog**. - 3. Complete the form. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) -4. Click **Create**. - - **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or [applications in any project]({{}}/rancher/v2.x/en/catalog/launching-apps/) from this catalog. - -# Adding Cluster Level Catalogs - -_Available as of v2.2.0_ - ->**Prerequisites:** In order to manage cluster scoped catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Custom Cluster Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Cluster Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-role-reference) role assigned. - -1. From the **Global** view, navigate to your cluster that you want to start adding custom catalogs. -2. Choose the **Tools > Catalogs** in the navigation bar. -2. Click **Add Catalog**. -3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Cluster** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) -5. Click **Create**. - -**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. - -# Adding Project Level Catalogs - -_Available as of v2.2.0_ - ->**Prerequisites:** In order to manage project scoped catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Project Owner Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) ->- [Custom Project Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Project Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) role assigned. - -1. From the **Global** view, navigate to your project that you want to start adding custom catalogs. -2. Choose the **Tools > Catalogs** in the navigation bar. -2. Click **Add Catalog**. -3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Project** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) -5. Click **Create**. - -**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project]({{}}/rancher/v2.x/en/catalog/apps/) from this catalog. - -# Custom Catalog Configuration Reference - -Refer to [this page]({{}}/rancher/v2.x/en/catalog/catalog-config) more information on configuring custom catalogs. \ No newline at end of file diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/built-in/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/built-in/_index.md deleted file mode 100644 index 43b8c332f..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/built-in/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Enabling and Disabling Built-in Global Catalogs -weight: 100 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/catalog/enabling-default-catalogs/ - - /rancher/v2.x/en/catalog/built-in ---- - -There are default global catalogs packaged as part of Rancher. - -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. - ->**Prerequisites:** In order to manage the built-in catalogs or manage global catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions-reference) role assigned. - -1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. - -2. Toggle the default catalogs that you want to be enabled or disabled: - - - **Library:** The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. This catalog features Rancher Charts, which include some [notable advantages]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/#rancher-charts) over native Helm charts. - - **Helm Stable:** This catalog, which is maintained by the Kubernetes community, includes native [Helm charts](https://helm.sh/docs/chart_template_guide/). This catalog features the largest pool of apps. - - **Helm Incubator:** Similar in user experience to Helm Stable, but this catalog is filled with applications in **beta**. - - **Result**: The chosen catalogs are enabled. Wait a few minutes for Rancher to replicate the catalog charts. When replication completes, you'll be able to see them in any of your projects by selecting **Apps** from the main navigation bar. In versions before v2.2.0, within a project, you can select **Catalog Apps** from the main navigation bar. diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/_index.md deleted file mode 100644 index 37b66da90..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Custom Catalog Configuration Reference -weight: 300 -aliases: - - /rancher/v2.x/en/catalog/catalog-config - - /rancher/v2.x/en/catalog/catalog-config ---- - -Any user can create custom catalogs to add into Rancher. Besides the content of the catalog, users must ensure their catalogs are able to be added into Rancher. - -- [Types of Repositories](#types-of-repositories) -- [Custom Git Repository](#custom-git-repository) -- [Custom Helm Chart Repository](#custom-helm-chart-repository) -- [Catalog Fields](#catalog-fields) -- [Private Repositories](#private-repositories) - - [Using Username and Password](#using-username-and-password) - - [Using an OAuth token](#using-an-oauth-token) - -# Types of Repositories - -Rancher supports adding in different types of repositories as a catalog: - -* Custom Git Repository -* Custom Helm Chart Repository - -# Custom Git Repository - -The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will default to use the `master` branch. Whenever you add a catalog to Rancher, it will be available almost immediately. - -# Custom Helm Chart Repository - -A Helm chart repository is an HTTP server that contains one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. - -Helm comes with a built-in package server for developer testing (`helm serve`). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). - -In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. - -# Catalog Fields - -When [adding your catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/) to Rancher, you'll provide the following information: - - -| Variable | Description | -| -------------------- | ------------- | -| Name | Name for your custom catalog to distinguish the repositories in Rancher | -| Catalog URL | URL of your custom chart repository| -| Use Private Catalog | Selected if you are using a private repository that requires authentication | -| Username (Optional) | Username or OAuth Token | -| Password (Optional) | If you are authenticating using a username, enter the associated password. If you are using an OAuth token, use `x-oauth-basic`. | -| Branch | For a Git repository, the branch name. Default: `master`. For a Helm Chart repository, this field is ignored. | -| Helm version | The Helm version that will be used to deploy all of the charts in the catalog. This field cannot be changed later. For more information, refer to the [section on Helm versions.]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) | - -# Private Repositories - -_Available as of v2.2.0_ - -Private Git or Helm chart repositories can be added into Rancher using either credentials, i.e. `Username` and `Password`. Private Git repositories also support authentication using OAuth tokens. - -### Using Username and Password - -1. When [adding the catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. - -2. Provide the `Username` and `Password` for your Git or Helm repository. - -### Using an OAuth token - -Read [using Git over HTTPS and OAuth](https://github.blog/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/) for more details on how OAuth authentication works. - -1. Create an [OAuth token](https://github.com/settings/tokens) -with `repo` permission selected, and click **Generate token**. - -2. When [adding the catalog]({{}}/rancher/v2.x/en/catalog/custom/adding/), select the **Use private catalog** checkbox. - -3. For `Username`, provide the Git generated OAuth token. For `Password`, enter `x-oauth-basic`. diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/_index.md deleted file mode 100644 index 63cec48c0..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/_index.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Creating Catalog Apps -weight: 400 -aliases: - - /rancher/v2.x/en/tasks/global-configuration/catalog/customizing-charts/ - - /rancher/v2.x/en/catalog/custom/creating - - /rancher/v2.x/en/catalog/custom - - /rancher/v2.x/en/catalog/creating-apps ---- - -Rancher's catalog service requires any custom catalogs to be structured in a specific format for the catalog service to be able to leverage it in Rancher. - -> For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. - -- [Chart types](#chart-types) - - [Helm charts](#helm-charts) - - [Rancher charts](#rancher-charts) -- [Chart directory structure](#chart-directory-structure) -- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) - - [questions.yml](#questions-yml) - - [Min/Max Rancher versions](#min-max-rancher-versions) - - [Question variable reference](#question-variable-reference) -- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) - -# Chart Types - -Rancher supports two different types of charts: Helm charts and Rancher charts. - -### Helm Charts - -Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you'll learn the chart's parameters and then configure them using **Answers**, which are sets of key value pairs. - -The Helm Stable and Helm Incubators are populated with native Helm charts. However, you can also use native Helm charts in Custom catalogs (although we recommend Rancher Charts). - -### Rancher Charts - -Rancher charts mirror native helm charts, although they add two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) - -Advantages of Rancher charts include: - -- **Enhanced revision tracking:** While Helm supports versioned deployments, Rancher adds tracking and revision history to display changes between different versions of the chart. -- **Streamlined application launch:** Rancher charts add simplified chart descriptions and configuration forms to make catalog application deployment easy. Rancher users need not read through the entire list of Helm variables to understand how to launch an application. -- **Application resource management:** Rancher tracks all the resources created by a specific application. Users can easily navigate to and troubleshoot on a page listing all the workload objects used to power an application. - -# Chart Directory Structure - -The following table demonstrates the directory structure for a Rancher Chart. The `charts` directory is the top level directory under the repository base. Adding the repository to Rancher will expose all charts contained within it. This information is helpful when customizing charts for a custom catalog. The `questions.yaml`, `README.md`, and `requirements.yml` files are specific to Rancher charts, but are optional for chart customization. - -``` -/ - │ - ├── charts/ - │ ├── / # This directory name will be surfaced in the Rancher UI as the chart name - │ │ ├── / # Each directory at this level provides different app versions that will be selectable within the chart in the Rancher UI - │ │ │ ├── Chart.yaml # Required Helm chart information file. - │ │ │ ├── questions.yaml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* - │ │ │ ├── README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. - │ │ │ ├── requirements.yml # Optional: YAML file listing dependencies for the chart. - │ │ │ ├── values.yml # Default configuration values for the chart. - │ │ │ ├── templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. -``` - -# Additional Files for Rancher Charts - -Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. - -- `app-readme.md` - - A file that provides descriptive text in the chart's UI header. The following image displays the difference between a Rancher chart (which includes `app-readme.md`) and a native Helm chart (which does not). - -
Rancher Chart with app-readme.md (left) vs. Helm Chart without (right)
- - ![app-readme.md]({{}}/img/rancher/app-readme.png) - -- `questions.yml` - - A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using key value pairs, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). - - -
Rancher Chart with questions.yml (left) vs. Helm Chart without (right)
- - ![questions.yml]({{}}/img/rancher/questions.png) - - -### questions.yml - -Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. - -### Min/Max Rancher versions - -_Available as of v2.3.0_ - -For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. - -> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. - -``` -rancher_min_version: 2.3.0 -rancher_max_version: 2.3.99 -``` - -### Question Variable Reference - -This reference contains variables that you can use in `questions.yml` nested under `questions:`. - -| Variable | Type | Required | Description | -| ------------- | ------------- | --- |------------- | -| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | -| label | string | true | Define the UI label. | -| description | string | false | Specify the description of the variable.| -| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| -| required | bool | false | Define if the variable is required or not (true \| false)| -| default | string | false | Specify the default value. | -| group | string | false | Group questions by input value. | -| min_length | int | false | Min character length.| -| max_length | int | false | Max character length.| -| min | int | false | Min integer length. | -| max | int | false | Max integer length. | -| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"| -| valid_chars | string | false | Regular expression for input chars validation. | -| invalid_chars | string | false | Regular expression for invalid input chars validation.| -| subquestions | []subquestion | false| Add an array of subquestions.| -| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | -| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| - ->**Note:** `subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. - -# Tutorial: Example Custom Chart Creation - -For a tutorial on adding a custom Helm chart to a custom catalog, refer to [this page.]({{}}/rancher/v2.x/en/catalog/tutorial) diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/_index.md deleted file mode 100644 index 798d3e955..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/_index.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Global DNS -weight: 5010 -aliases: - - /rancher/v2.x/en/catalog/globaldns ---- - -_Available as of v2.2.0_ - -Rancher's Global DNS feature provides a way to program an external DNS provider to route traffic to your Kubernetes applications. Since the DNS programming supports spanning applications across different Kubernetes clusters, Global DNS is configured at a global level. An application can become highly available as it allows you to have one application run on different Kubernetes clusters. If one of your Kubernetes clusters goes down, the application would still be accessible. - -> **Note:** Global DNS is only available in [Kubernetes installations]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) with the `local` cluster enabled. - -- [Global DNS Providers](#global-dns-providers) -- [Global-DNS-Entries](#global-dns-entries) -- [Permissions for Global DNS Providers and Entries](#permissions-for-global-dns-providers-and-entries) -- [Setting up Global DNS for Applications](#setting-up-global-dns-for-applications) -- [Adding a Global DNS Entry](#adding-a-global-dns-entry) -- [Editing a Global DNS Provider](#editing-a-global-dns-provider) -- [Global DNS Entry Configuration](#global-dns-entry-configuration) -- [DNS Provider Configuration](#dns-provider-configuration) - - [Route53](#route53) - - [CloudFlare](#cloudflare) - - [AliDNS](#alidns) -- [Adding Annotations to Ingresses to program the External DNS](#adding-annotations-to-ingresses-to-program-the-external-dns) - -# Global DNS Providers - -Before adding in Global DNS entries, you will need to configure access to an external provider. - -The following table lists the first version of Rancher each provider debuted. - -| DNS Provider | Available as of | -| --- | --- | -| [AWS Route53](https://aws.amazon.com/route53/) | v2.2.0 | -| [CloudFlare](https://www.cloudflare.com/dns/) | v2.2.0 | -| [AliDNS](https://www.alibabacloud.com/product/dns) | v2.2.0 | - -# Global DNS Entries - -For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. - -# Permissions for Global DNS Providers and Entries - -By default, only [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. - -# Setting up Global DNS for Applications - -1. From the **Global View**, select **Tools > Global DNS Providers**. -1. To add a provider, choose from the available provider options and configure the Global DNS Provider with necessary credentials and an optional domain. For help, see [DNS Provider Configuration.](#dns-provider-configuration) -1. (Optional) Add additional users so they could use the provider when creating Global DNS entries as well as manage the Global DNS provider. -1. (Optional) Pass any custom values in the Additional Options section. - -# Adding a Global DNS Entry - -1. From the **Global View**, select **Tools > Global DNS Entries**. -1. Click on **Add DNS Entry**. -1. Fill out the form. For help, refer to [Global DNS Entry Configuration.](#global-dns-entry-configuration) -1. Click **Create.** - -# Editing a Global DNS Provider - -The [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: - -- Root Domain -- Access Key & Secret Key -- Members -- Custom values - -1. From the **Global View**, select **Tools > Global DNS Providers**. - -1. For the Global DNS provider that you want to edit, click the **⋮ > Edit**. - -# Editing a Global DNS Entry - -The [global administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: - -- FQDN -- Global DNS Provider -- Target Projects or Multi-Cluster App -- DNS TTL -- Members - -Any users who can access the Global DNS entry can **only** add target projects that they have access to. However, users can remove **any** target project as there is no check to confirm if that user has access to the target project. - -Permission checks are relaxed for removing target projects in order to support situations where the user's permissions might have changed before they were able to delete the target project. Another use case could be that the target project was removed from the cluster before being removed from a target project of the Global DNS entry. - -1. From the **Global View**, select **Tools > Global DNS Entries**. - -1. For the Global DNS entry that you want to edit, click the **⋮ > Edit**. - - -# Global DNS Entry Configuration - -| Field | Description | -|----------|--------------------| -| FQDN | Enter the **FQDN** you wish to program on the external DNS. | -| Provider | Select a Global DNS **Provider** from the list. | -| Resolves To | Select if this DNS entry will be for a [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or for workloads in different [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). | -| Multi-Cluster App Target | The target for the global DNS entry. You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. | -| DNS TTL | Configure the DNS time to live value in seconds. By default, it will be 300 seconds. | -| Member Access | Search for any users that you want to have the ability to manage this Global DNS entry. | - -# DNS Provider Configuration - -### Route53 - -| Field | Explanation | -|---------|---------------------| -| Name | Enter a **Name** for the provider. | -| Root Domain | (Optional) Enter the **Root Domain** of the hosted zone on AWS Route53. If this is not provided, Rancher's Global DNS Provider will work with all hosted zones that the AWS keys can access. | -| Credential Path | The [AWS credential path.](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-where) | -| Role ARN | An [Amazon Resource Name.](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) | -| Region | An [AWS region.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) | -| Zone | An [AWS zone.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.AvailabilityZones) | -| Access Key | Enter the AWS **Access Key**. | -| Secret Key | Enter the AWS **Secret Key**. | -| Member Access | Under **Member Access**, search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | - - -### CloudFlare - -| Field | Explanation | -|---------|---------------------| -| Name | Enter a **Name** for the provider. | -| Root Domain | Optional: Enter the **Root Domain**. In case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. | -| Proxy Setting | When set to yes, the global DNS entry that gets created for the provider has proxy settings on. | -| API Email | Enter the CloudFlare **API Email**. | -| API Key | Enter the CloudFlare **API Key**. | -| Member Access | Search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | - -### AliDNS - ->**Notes:** -> ->- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running `local` cluster, and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. ->- Different versions of AliDNS have different allowable TTL range, where the default TTL for a global DNS entry may not be valid. Please see the [reference](https://www.alibabacloud.com/help/doc-detail/34338.htm) before adding an AliDNS entry. - -| Field | Explanation | -|---------|---------------------| -| Name | Enter a **Name** for the provider. | -| Root Domain | Optional: Enter the **Root Domain**. In case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. | -| Access Key | Enter the **Access Key**. | -| Secret Key | Enter the **Secret Key**. | -| Member Access | Search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | - -# Adding Annotations to Ingresses to program the External DNS - -In order for Global DNS entries to be programmed, you will need to add a specific annotation on an ingress in your application or target project. - -For any application that you want targeted for your Global DNS entry, find an ingress associated with the application. - -This ingress needs to use a specific `hostname` and an annotation that should match the FQDN of the Global DNS entry. - -In order for the DNS to be programmed, the following requirements must be met: - -* The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. -* The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. - -Once the ingress in your [multi-cluster application]({{}}/rancher/v2.x/en/catalog/multi-cluster-apps/) or in your target projects is in an `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. \ No newline at end of file diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/launching-apps/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/launching-apps/_index.md deleted file mode 100644 index ce8e22b1b..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/launching-apps/_index.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Launching Catalog Apps -weight: 700 -aliases: - - /rancher/v2.x/en/catalog/launching-apps ---- - -Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/#catalog-scopes). - -If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/). - -- [Prerequisites](#prerequisites) -- [Launching a catalog app](#launching-a-catalog-app) -- [Configuration options](#configuration-options) - -# Prerequisites - -When Rancher deploys a catalog app, it launches an ephemeral instance of a Helm service account that has the permissions of the user deploying the catalog app. Therefore, a user cannot gain more access to the cluster through Helm or a catalog application than they otherwise would have. - -To launch an app from a catalog in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster, which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the cluster that include the target project - -Before launching an app, you'll need to either [enable a built-in global catalog]({{}}/rancher/v2.x/en/catalog/built-in) or [add your own custom catalog.]({{}}/rancher/v2.x/en/catalog/adding-catalogs) - -# Launching a Catalog App - -1. From the **Global** view, open the project that you want to deploy an app to. - -2. From the main navigation bar, choose **Apps**. In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the app that you want to launch, and then click **View Now**. - -4. Under **Configuration Options** enter a **Name**. By default, this name is also used to create a Kubernetes namespace for the application. - - * If you would like to change the **Namespace**, click **Customize** and enter a new name. - * If you want to use a different namespace that already exists, click **Customize**, and then click **Use an existing namespace**. Choose a namespace from the list. - -5. Select a **Template Version**. - -6. Complete the rest of the **Configuration Options**. - - * For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs), answers are provided as key value pairs in the **Answers** section. - * Keys and values are available within **Detailed Descriptions**. - * When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of --set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. For example, when entering an answer that includes two values separated by a comma (i.e., `abc, bcd`), wrap the values with double quotes (i.e., `"abc, bcd"`). - -7. Review the files in **Preview**. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's **Workloads** view or **Apps** view. In versions before v2.2.0, this is the **Catalog Apps** view. - -# Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -{{% tabs %}} -{{% tab "UI" %}} - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -{{% /tab %}} -{{% tab "Editing YAML Files" %}} - -_Available as of v2.1.0_ - -If you do not want to input answers using the UI, you can choose the **Edit as YAML** option. - -With this example YAML: - -```YAML -outer: - inner: value -servers: -- port: 80 - host: example -``` - -### Key Value Pairs - -You can have a YAML file that translates these fields to match how to [format custom values so that it can be used with `--set`](https://github.com/helm/helm/blob/master/docs/using_helm.md#the-format-and-limitations-of---set). - -These values would be translated to: - -``` -outer.inner=value -servers[0].port=80 -servers[0].host=example -``` - -### YAML files - -_Available as of v2.2.0_ - -You can directly paste that YAML formatted structure into the YAML editor. By allowing custom values to be set using a YAML formatted structure, Rancher has the ability to easily customize for more complicated input values (e.g. multi-lines, array and JSON objects). -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/managing-apps/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/managing-apps/_index.md deleted file mode 100644 index 5873a303d..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/managing-apps/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Managing Catalog Apps -weight: 500 -aliases: - - /rancher/v2.x/en/catalog/managing-apps ---- - -After deploying an application, one of the benefits of using an application versus individual workloads/resources is the ease of being able to manage many workloads/resources applications. Apps can be cloned, upgraded or rolled back. - -- [Cloning catalog applications](#cloning-catalog-applications) -- [Upgrading catalog applications](#upgrading-catalog-applications) -- [Rolling back catalog applications](#rolling-back-catalog-applications) -- [Deleting catalog application deployments](#deleting-catalog-application-deployments) - -### Cloning Catalog Applications - -After an application is deployed, you can easily clone it to use create another application with almost the same configuration. It saves you the work of manually filling in duplicate information. - -### Upgrading Catalog Applications - -After an application is deployed, you can easily upgrade to a different template version. - -1. From the **Global** view, navigate to the project that contains the catalog application that you want to upgrade. - -1. From the main navigation bar, choose **Apps**. In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the application that you want to upgrade, and then click the ⋮ to find **Upgrade**. - -4. Select the **Template Version** that you want to deploy. - -5. (Optional) Update your **Configuration Options**. - -6. (Optional) Select whether or not you want to force the catalog application to be upgraded by checking the box for **Delete and recreate resources if needed during the upgrade**. - - > In Kubernetes, some fields are designed to be immutable or cannot be updated directly. As of v2.2.0, you can now force your catalog application to be updated regardless of these fields. This will cause the catalog apps to be deleted and resources to be re-created if needed during the upgrade. - -7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. - -**Result**: Your application is updated. You can view the application status from the project's: - -- **Workloads** view -- **Apps** view. In versions before v2.2.0, this is the **Catalog Apps** view. - - -### Rolling Back Catalog Applications - -After an application has been upgraded, you can easily rollback to a different template version. - -1. From the **Global** view, navigate to the project that contains the catalog application that you want to upgrade. - -1. From the main navigation bar, choose **Apps**. In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the application that you want to rollback, and then click the ⋮ to find **Rollback**. - -4. Select the **Revision** that you want to roll back to. By default, Rancher saves up to the last 10 revisions. - -5. (Optional) Select whether or not you want to force the catalog application to be upgraded by checking the box for **Delete and recreate resources if needed during the upgrade**. - - > In Kubernetes, some fields are designed to be immutable or cannot be updated directly. As of v2.2.0, you can now force your catalog application to be updated regardless of these fields. This will cause the catalog apps to be deleted and resources to be re-created if needed during the rollback. - -7. Click **Rollback**. - -**Result**: Your application is updated. You can view the application status from the project's: - -- **Workloads** view -- **Apps** view. In versions before v2.2.0, this is the **Catalog Apps** view. - -### Deleting Catalog Application Deployments - -As a safeguard to prevent you from unintentionally deleting other catalog applications that share a namespace, deleting catalog applications themselves does not delete the namespace they're assigned to. - -Therefore, if you want to delete both an app and the namespace that contains the app, you should remove the app and the namespace separately: - -1. Uninstall the app using the app's `uninstall` function. - -1. From the **Global** view, navigate to the project that contains the catalog application that you want to delete. - -1. From the main menu, choose **Namespaces**. - -1. Find the namespace running your catalog app. Select it and click **Delete**. - -**Result:** The catalog application deployment and its namespace are deleted. diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/multi-cluster-apps/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/multi-cluster-apps/_index.md deleted file mode 100644 index 91cb1f44f..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/multi-cluster-apps/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Multi-Cluster Apps -weight: 600 -aliases: - - /rancher/v2.x/en/catalog/multi-cluster-apps ---- -_Available as of v2.2.0_ - -The documentation about multi-cluster apps has moved [here.]({{}}/rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps) diff --git a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/tutorial/_index.md b/content/rancher/v2.x/en/helm-charts/legacy-catalogs/tutorial/_index.md deleted file mode 100644 index 18155cd69..000000000 --- a/content/rancher/v2.x/en/helm-charts/legacy-catalogs/tutorial/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "Tutorial: Example Custom Chart Creation" -weight: 800 -aliases: - - /rancher/v2.x/en/catalog/tutorial ---- - -In this tutorial, you'll learn how to create a Helm chart and deploy it to a repository. The repository can then be used as a source for a custom catalog in Rancher. - -You can fill your custom catalogs with either Helm Charts or Rancher Charts, although we recommend Rancher Charts due to their enhanced user experience. - -> For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://helm.sh/docs/chart_template_guide/). - -1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in [Chart Directory Structure]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/#chart-directory-structure). - - Rancher requires this directory structure, although `app-readme.md` and `questions.yml` are optional. - - >**Tip:** - > - >- To begin customizing a chart, copy one from either the [Rancher Library](https://github.com/rancher/charts) or the [Helm Stable](https://github.com/kubernetes/charts/tree/master/stable). - >- For a complete walk through of developing charts, see the upstream Helm chart [developer reference](https://docs.helm.sh/developing_charts/). - -2. **Recommended:** Create an `app-readme.md` file. - - Use this file to create custom text for your chart's header in the Rancher UI. You can use this text to notify users that the chart is customized for your environment or provide special instruction on how to use it. -
-
- **Example**: - - ``` - $ cat ./app-readme.md - - # Wordpress ROCKS! - ``` - -3. **Recommended:** Create a `questions.yml` file. - - This file creates a form for users to specify deployment parameters when they deploy the custom chart. Without this file, users **must** specify the parameters manually using key value pairs, which isn't user-friendly. -
-
- The example below creates a form that prompts users for persistent volume size and a storage class. -
-
- For a list of variables you can use when creating a `questions.yml` file, see [Question Variable Reference]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/#question-variable-reference). - - ```yaml - categories: - - Blog - - CMS - questions: - - variable: persistence.enabled - default: "false" - description: "Enable persistent volume for WordPress" - type: boolean - required: true - label: WordPress Persistent Volume Enabled - show_subquestion_if: true - group: "WordPress Settings" - subquestions: - - variable: persistence.size - default: "10Gi" - description: "WordPress Persistent Volume Size" - type: string - label: WordPress Volume Size - - variable: persistence.storageClass - default: "" - description: "If undefined or null, uses the default StorageClass. Default to null" - type: storageclass - label: Default StorageClass for WordPress - ``` - -4. Check the customized chart into your GitHub repo. - -**Result:** Your custom chart is added to the repo. Your Rancher Server will replicate the chart within a few minutes. diff --git a/content/rancher/v2.x/en/installation/_index.md b/content/rancher/v2.x/en/installation/_index.md deleted file mode 100644 index 9b88fd204..000000000 --- a/content/rancher/v2.x/en/installation/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Installing/Upgrading Rancher -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 3 -aliases: - - /rancher/v2.x/en/installation/how-ha-works/ ---- - -This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. - -# Terminology - -In this section, - -- **The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. -- **RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. -- **K3s (Lightweight Kubernetes)** is also a fully compliant Kubernetes distribution. It is newer than RKE, easier to use, and more lightweight, with a binary size of less than 100 MB. As of Rancher v2.4, Rancher can be installed on a K3s cluster. -- **RKE2** is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. -- **RancherD** is a new tool for installing Rancher, which is available as of Rancher v2.5.4. It is an experimental feature. RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. - -# Changes to Installation in Rancher v2.5 - -In Rancher v2.5, the Rancher management server can be installed on any Kubernetes cluster, including hosted clusters, such as Amazon EKS clusters. - -For Docker installations, a local Kubernetes cluster is installed in the single Docker container, and Rancher is installed on the local cluster. - -The `restrictedAdmin` Helm chart option was added. When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#restricted-admin) - -# Overview of Installation Options - -Rancher can be installed on these main architectures: - -### High-availability Kubernetes Install with the Helm CLI - -We recommend using Helm, a Kubernetes package manager, to install Rancher on multiple nodes on a dedicated Kubernetes cluster. For RKE clusters, three nodes are required to achieve a high-availability cluster. For K3s clusters, only two nodes are required. - -### High-availability Kubernetes Install with RancherD - -_Available as of v2.5.4_ - -> This is an experimental feature. - -RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. - -In both the RancherD install and the Helm CLI install, Rancher is installed as a Helm chart on a Kubernetes cluster. - -Configuration and upgrading are also simplified with RancherD. When you upgrade the RancherD binary, both the Kubernetes cluster and the Rancher Helm chart are upgraded. - -### Automated Quickstart to Deploy Rancher on Amazon EKS - -Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher v2.5+ on an EKS Kubernetes cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) - -### Single-node Kubernetes Install - -Rancher can be installed on a single-node Kubernetes cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. - -However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. - -### Docker Install - -For test and demonstration purposes, Rancher can be installed with Docker on a single node. - -For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.x/en/backups/v2.5/migrating-rancher/) - -### Other Options - -There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: - -| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | -| ---------------------------------- | ------------------------------ | ---------- | -| With direct access to the Internet | [Docs]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) | -| Behind an HTTP proxy | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/) | These [docs,]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) plus this [configuration]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/) | -| In an air gap environment | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) | - -We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. - -For that reason, we recommend that for a production-grade architecture, you should set up a high-availability Kubernetes cluster, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. - -> The type of cluster that Rancher needs to be installed on depends on the Rancher version. -> -> For Rancher v2.5, any Kubernetes cluster can be used. -> For Rancher v2.4.x, either an RKE Kubernetes cluster or K3s Kubernetes cluster can be used. -> For Rancher before v2.4, an RKE cluster must be used. - -For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. - -Our [instructions for installing Rancher on Kubernetes]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. - -When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,]({{}}/rancher/v2.x/en/installation/requirements) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. - -For a longer discussion of Rancher architecture, refer to the [architecture overview,]({{}}/rancher/v2.x/en/overview/architecture) [recommendations for production-grade architecture,]({{}}/rancher/v2.x/en/overview/architecture-recommendations) or our [best practices guide.]({{}}/rancher/v2.x/en/best-practices/deployment-types) - -# Prerequisites -Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -# Architecture Tip - -For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -For more architecture recommendations, refer to [this page.]({{}}/rancher/v2.x/en/overview/architecture-recommendations) - -### More Options for Installations on a Kubernetes Cluster - -Refer to the [Helm chart options]({{}}/rancher/v2.x/en/installation/resources/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: - -- With [API auditing to record all transactions]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) -- With [TLS termination on a load balancer]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) -- With a [custom Ingress]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#customizing-your-ingress) - -In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: - -- [RKE configuration options]({{}}/rke/latest/en/config-options/) -- [K3s configuration options]({{}}/k3s/latest/en/installation/install-options/) - -### More Options for Installations with Docker - -Refer to the [docs about options for Docker installs]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) for details about other configurations including: - -- With [API auditing to record all transactions]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) -- With an [external load balancer]({{}}/rancher/v2.x/en/installation/options/single-node-install-external-lb/) -- With a [persistent data store]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#persistent-data) diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/_index.md deleted file mode 100644 index 840e672c0..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/_index.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Install/Upgrade Rancher on a Kubernetes Cluster -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 2 -aliases: - - /rancher/v2.x/en/installation/k8s-install/ - - /rancher/v2.x/en/installation/k8s-install/helm-rancher - - /rancher/v2.x/en/installation/k8s-install/kubernetes-rke - - /rancher/v2.x/en/installation/ha-server-install - - /rancher/v2.x/en/installation/install-rancher-on-k8s/install ---- - -In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. - -- [Prerequisites](#prerequisites) -- [Install the Rancher Helm Chart](#install-the-rancher-helm-chart) - -# Prerequisites - -- [Kubernetes Cluster](#kubernetes-cluster) -- [CLI Tools](#cli-tools) -- [Ingress Controller (Only for Hosted Kubernetes)](#ingress-controller-for-hosted-kubernetes) - -### Kubernetes Cluster - -Set up the Rancher server's local Kubernetes cluster. - -The cluster requirements depend on the Rancher version: - -- **As of Rancher v2.5,** Rancher can be installed on any Kubernetes cluster. This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. Note: To deploy Rancher v2.5 on a hosted Kubernetes cluster such as EKS, GKE, or AKS, you should deploy a compatible Ingress controller first to configure [SSL termination on Rancher.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/#3-choose-your-ssl-configuration) -- **In Rancher v2.4.x,** Rancher needs to be installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. -- **In Rancher before v2.4,** Rancher needs to be installed on an RKE Kubernetes cluster. - -For help setting up a Kubernetes cluster, we provide these tutorials: - -- **RKE:** For the tutorial to install an RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha) -- **K3s:** For the tutorial to install a K3s Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db) -- **RKE2:** For the tutorial to install an RKE2 Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke2) For help setting up the infrastructure for a high-availability RKE2 cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha) -- **Amazon EKS:** To install Rancher on Amazon EKS, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks) - -### CLI Tools - -The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. - -### Ingress Controller (for Hosted Kubernetes) - -To deploy Rancher v2.5+ on a hosted Kubernetes cluster such as EKS, GKE, or AKS, you should deploy a compatible Ingress controller first to configure [SSL termination on Rancher.](#3-choose-your-ssl-configuration) - -For more information about deploying Rancher on EKS, refer to [this page.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks) - -# Install the Rancher Helm Chart - -Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. - -With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). - -To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.x/en/installation/options/server-tags) - -To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) - -> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -To set up Rancher, - -1. [Add the Helm chart repository](#1-add-the-helm-chart-repository) -2. [Create a namespace for Rancher](#2-create-a-namespace-for-rancher) -3. [Choose your SSL configuration](#3-choose-your-ssl-configuration) -4. [Install cert-manager](#4-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) -5. [Install Rancher with Helm and your chosen certificate option](#5-install-rancher-with-helm-and-your-chosen-certificate-option) -6. [Verify that the Rancher server is successfully deployed](#6-verify-that-the-rancher-server-is-successfully-deployed) -7. [Save your options](#7-save-your-options) - -### 1. Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### 2. Create a Namespace for Rancher - -We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: - -``` -kubectl create namespace cattle-system -``` - -### 3. Choose your SSL Configuration - -The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: - -- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. -- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. -- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. - - -| Configuration | Helm Chart Option | Requires cert-manager | -| ------------------------------ | ----------------------- | ------------------------------------- | -| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#5-install-cert-manager) | -| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#5-install-cert-manager) | -| Certificates from Files | `ingress.tls.source=secret` | no | - -### 4. Install cert-manager - -> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). - -{{% accordion id="cert-manager" label="Click to Expand" %}} - -> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - -These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). - -``` -# Install the CustomResourceDefinition resources separately -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml - -# **Important:** -# If you are running Kubernetes v1.15 or below, you -# will need to add the `--validate=false` flag to your -# kubectl apply command, or else you will receive a -# validation error relating to the -# x-kubernetes-preserve-unknown-fields field in -# cert-manager’s CustomResourceDefinition resources. -# This is a benign error and occurs due to the way kubectl -# performs resource validation. - -# Create the namespace for cert-manager -kubectl create namespace cert-manager - -# Add the Jetstack Helm repository -helm repo add jetstack https://charts.jetstack.io - -# Update your local Helm chart repository cache -helm repo update - -# Install the cert-manager Helm chart -helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v1.0.4 -``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -{{% /accordion %}} - -### 5. Install Rancher with Helm and Your Chosen Certificate Option - -The exact command to install Rancher differs depending on the certificate configuration. - -{{% tabs %}} -{{% tab "Rancher-generated Certificates" %}} - - -The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. - -Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set the `hostname` to the DNS name you pointed at your load balancer. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Let's Encrypt" %}} - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. - -In the following command, - -- `hostname` is set to the public DNS record, -- `ingress.tls.source` is set to `letsEncrypt` -- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Certificates from Files" %}} -In this option, Kubernetes secrets are created from your own certificates for Rancher to use. - -When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. - -Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. - -> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.x/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set the `hostname`. -- Set `ingress.tls.source` to `secret`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. -{{% /tab %}} -{{% /tabs %}} - -The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. - -- [HTTP Proxy]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) -- [Private Docker Image Registry]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.x/en/installation/resources/chart-options/) for the full list of options. - - -### 6. Verify that the Rancher Server is Successfully Deployed - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### 7. Save Your Options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it. You should have a functional Rancher server. - -In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) Page - - -### Optional Next Steps - -Enable the Enterprise Cluster Manager. diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/_index.md deleted file mode 100644 index ca508b57a..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/_index.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Installing Rancher on Amazon EKS -shortTitle: Amazon EKS -weight: 4 ---- - -This page covers two ways to install Rancher v2.5+ on EKS. Older Rancher versions should not be installed on hosted Kubernetes clusters. - -The first is a guide for deploying the Rancher server on an EKS cluster using CloudFormation. This guide was created in collaboration with Amazon Web Services to show how to deploy Rancher following best practices. - -The second is a guide for installing an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. - -If you already have an EKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -- [Automated Quickstart using AWS Best Practices](#automated-quickstart-using-aws-best-practices) -- [Creating an EKS Cluster for the Rancher Server](#creating-an-eks-cluster-for-the-rancher-server) - -# Automated Quickstart using AWS Best Practices - -Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) - -The quick start guide provides three options for deploying Rancher on EKS: - -- **Deploy Rancher into a new VPC and new Amazon EKS cluster.** This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, Amazon EKS cluster, and other infrastructure components. It then deploys Rancher into this new EKS cluster. -- **Deploy Rancher into an existing VPC and a new Amazon EKS cluster.** This option provisions Rancher in your existing AWS infrastructure. -- **Deploy Rancher into an existing VPC and existing Amazon EKS cluster.** This option provisions Rancher in your existing AWS infrastructure. - -Deploying this Quick Start for a new virtual private cloud (VPC) and new Amazon EKS cluster using default parameters builds the following Rancher environment in the AWS Cloud: - -- A highly available architecture that spans three Availability Zones.* -- A VPC configured with public and private subnets, according to AWS best practices, to provide you with your own virtual network on AWS.* -- In the public subnets: - - Managed network address translation (NAT) gateways to allow outbound internet access for resources.* - - Linux bastion hosts in an Auto Scaling group to allow inbound Secure Shell (SSH) access to Amazon Elastic Compute Cloud (Amazon EC2) instances in public and private subnets.* -- In the private subnets: - - Kubernetes nodes in an Auto Scaling group.* - - A Network Load Balancer (not shown) for accessing the Rancher console. -- Rancher deployment using AWS Systems Manager automation. -- Amazon EKS service for the EKS cluster, which provides the Kubernetes control plane.* -- An Amazon Route 53 DNS record for accessing the Rancher deployment. - -\* The CloudFormation template that deploys the Quick Start into an existing Amazon EKS cluster skips the components marked by asterisks and prompts you for your existing VPC configuration. - -# Creating an EKS Cluster for the Rancher Server - -In this section, you'll install an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. - -> **Requirements:** -> -> - You should already have an AWS account. -> - It is recommended to use an IAM user instead of the root AWS account. You will need the IAM user's access key and secret key to configure the AWS command line interface. -> - The IAM user needs the minimum IAM policies described in the official [eksctl documentation.](https://eksctl.io/usage/minimum-iam-policies/) -> - Only Rancher v2.5+ can be installed on hosted Kubernetes clusters. - -### 1. Prepare your Workstation - -Install the following command line tools on your workstation: - -- **The AWS CLI v2:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -- **eksctl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) -- **kubectl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) -- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) - -### 2. Configure the AWS CLI - -To configure the AWS CLI, run the following command: - -``` -aws configure -``` - -Then enter the following values: - -| Value | Description | -|-------|-------------| -| AWS Access Key ID | The access key credential for the IAM user with EKS permissions. | -| AWS Secret Access Key | The secret key credential for the IAM user with EKS permissions. | -| Default region name | An [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) where the cluster nodes will be located. | -| Default output format | Enter `json`. | - -### 3. Create the EKS Cluster - -To create an EKS cluster, run the following command. Use the AWS region that applies to your use case: - -``` -eksctl create cluster \ - --name rancher-server \ - --version 1.18 \ - --region us-west-2 \ - --nodegroup-name ranchernodes \ - --nodes 3 \ - --nodes-min 1 \ - --nodes-max 4 \ - --managed -``` - -The cluster will take some time to be deployed with CloudFormation. - -### 4. Test the Cluster - -To test the cluster, run: - -``` -eksctl get cluster -``` - -The result should look like the following: - -``` -eksctl get cluster -2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0 -2021-03-18 15:09:35 [ℹ] using region us-west-2 -NAME REGION EKSCTL CREATED -iztest2 us-west-2 True -``` - -### 5. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. - -The following command installs an `nginx-ingress-controller` with a LoadBalancer service. This will result in an ELB (Elastic Load Balancer) in front of NGINX: - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -### 6. Get Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) - AGE -ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP - 27m -``` - -Save the `EXTERNAL-IP`. - -### 7. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the AWS documentation on [routing traffic to an ELB load balancer.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) - -### 8. Install the Rancher Helm Chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md deleted file mode 100644 index 1b523500d..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ /dev/null @@ -1,269 +0,0 @@ ---- -title: Rancher Helm Chart Options -weight: 1 -aliases: - - /rancher/v2.x/en/installation/options/ - - /rancher/v2.x/en/installation/options/chart-options/ - - /rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/ - - /rancher/v2.x/en/installation/resources/chart-options ---- - -This page is a configuration reference for the Rancher Helm chart. - -For help choosing a Helm chart version, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/choosing-version/) - -For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/feature-flags/) - -- [Common Options](#common-options) -- [Advanced Options](#advanced-options) -- [API Audit Log](#api-audit-log) -- [Setting Extra Environment Variables](#setting-extra-environment-variables) -- [TLS Settings](#tls-settings) -- [Customizing your Ingress](#customizing-your-ingress) -- [HTTP Proxy](#http-proxy) -- [Additional Trusted CAs](#additional-trusted-cas) -- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) -- [External TLS Termination](#external-tls-termination) - -### Common Options - -| Option | Default Value | Description | -| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | -| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | -| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | -| `letsEncrypt.email` | " " | `string` - Your email address | -| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | -| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | - -
- -### Advanced Options - -| Option | Default Value | Description | -| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | -| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" Rancher server cluster. _Note: This option is no longer available in v2.5.0. Consider using the `restrictedAdmin` option to prevent users from modifying the local cluster._ | -| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | -| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | -| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | -| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | -| `certmanager.version` | "" | `string` - set cert-manager compatibility | -| `debug` | false | `bool` - set debug flag on rancher server | -| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | -| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | -| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | -| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | -| `ingress.enabled` | true | When set to false, Helm will not install a Rancher ingress. Set the option to false to deploy your own ingress. _Available as of v2.5.6_ | -| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges. Options: traefik, nginx. | | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - comma separated list of hostnames or ip address not to use the proxy | | -| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | -| `rancherImage` | "rancher/rancher" | `string` - rancher image source | -| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | -| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | -| `replicas` | 3 | `int` - Number of replicas of Rancher pods | -| `resources` | {} | `map` - rancher pod resource requests & limits | -| `restrictedAdmin` | `false` | _Available in Rancher v2.5_ `bool` - When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#restricted-admin) | -| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ | -| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | -| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | - - - -### API Audit Log - -Enabling the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing/). - -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. - -```plain ---set auditLog.level=1 -``` - -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. - -Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. - -> In an air-gapped environment, supply the `--set busyboxImage` value during helm install or upgrades to reference the private registry location of the busybox container image, this image is used for the sidecar container. - -### Setting Extra Environment Variables - -You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -### TLS Settings - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. - -### Import `local` Cluster - -By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. - -> **Important:** If you turn addLocal off, most Rancher v2.5 features won't work, including the EKS provisioner. - -If this is a concern in your environment you can set this option to "false" on your initial install. - -This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. - -```plain ---set addLocal="false" -``` - -### Customizing your Ingress - -To customize or use a different ingress with Rancher server you can set your own Ingress annotations. - -Example on setting a custom certificate issuer: - -```plain ---set ingress.extraAnnotations.'certmanager\.k8s\.io/cluster-issuer'=ca-key-pair -``` - -Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. - -```plain ---set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' -``` - -### HTTP Proxy - -Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. - -Add your IP exceptions to the `noProxy` list. Make sure you add the Pod cluster IP range (default: `10.42.0.0/16`), Service cluster IP range (default: `10.43.0.0/16`), the internal cluster domains (default: `.svc,.cluster.local`) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. - -```plain ---set proxy="http://:@:/" ---set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local" -``` - -### Additional Trusted CAs - -If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. - -```plain ---set additionalTrustedCAs=true -``` - -Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. - -```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem -``` - -### Private Registry and Air Gap Installs - -For details on installing Rancher with a private registry, see: - -- [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) - -# External TLS Termination - -We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. - -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. - -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.x/en/installation/resources/encryption/tls-secrets/) to add the CA cert for Rancher. - -Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. - -### Configuring Ingress for External TLS when Using NGINX v0.25 - -In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: - -```yaml -ingress: - provider: nginx - options: - use-forwarded-headers: 'true' -``` - -### Required Headers - -- `Host` -- `X-Forwarded-Proto` -- `X-Forwarded-Port` -- `X-Forwarded-For` - -### Recommended Timeouts - -- Read Timeout: `1800 seconds` -- Write Timeout: `1800 seconds` -- Connect Timeout: `30 seconds` - -### Health Checks - -Rancher will respond `200` to health checks on the `/healthz` endpoint. - -### Example NGINX config - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/rollbacks/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/rollbacks/_index.md deleted file mode 100644 index 90ec73ee3..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/rollbacks/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Rollbacks -weight: 3 -aliases: - - /rancher/v2.x/en/upgrades/rollbacks - - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks - - /rancher/v2.x/en/upgrades/ha-server-rollbacks - - /rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks - - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks/ha-server-rollbacks - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/rollbacks ---- - -- [Rolling Back to Rancher v2.5.0+](#rolling-back-to-rancher-v2-5-0) -- [Rolling Back to Rancher v2.2-v2.4+](#rolling-back-to-rancher-v2-2-v2-4) -- [Rolling Back to Rancher v2.0-v2.1](#rolling-back-to-rancher-v2-0-v2-1) - -# Rolling Back to Rancher v2.5.0+ - -To roll back to Rancher v2.5.0+, use the `rancher-backup` application and restore Rancher from backup. - -Rancher has to be started with the lower/previous version after a rollback. - -A restore is performed by creating a Restore custom resource. - -> **Important** -> -> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.](../migrating-rancher) -> * While restoring rancher on the same setup, the operator will scale down the rancher deployment when restore starts, and it will scale back up the deployment once restore completes. So Rancher will be unavailable during the restore. - -### Create the Restore Custom Resource - -1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** -1. Click **Restore.** -1. Create the Restore with the form, or with YAML. For creating the Restore resource using form, refer to the [configuration reference](../../../backups/v2.5/configuration/restore-config/) and to the [examples.](../../../backups/v2.5/examples) -1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Restore YAML. - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Restore - metadata: - name: restore-migration - spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - encryptionConfigSecretName: encryptionconfig - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - ``` - - For help configuring the Restore, refer to the [configuration reference](../../../backups/v2.5/configuration/restore-config/) and to the [examples.](../../../backups/v2.5/examples) - -1. Click **Create.** - -**Result:** The rancher-operator scales down the rancher deployment during restore, and scales it back up once the restore completes. The resources are restored in this order: - -1. Custom Resource Definitions (CRDs) -2. Cluster-scoped resources -3. Namespaced resources - -To check how the restore is progressing, you can check the logs of the operator. Follow these steps to get the logs: - -```yaml -kubectl get pods -n cattle-resources-system -kubectl logs -n cattle-resources-system -f -``` - -### Roll back to the previous Rancher version - -Rancher can be rolled back using the Rancher UI. - -1. In the Rancher UI, go to the local cluster. -1. Go to the System project. -1. Edit Rancher deployment and modify image to version that you are rolling back to. -1. Save changes made. - -# Rolling Back to Rancher v2.2-v2.4+ - -To roll back to Rancher before v2.5, follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{}}/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/) Restoring a snapshot of the Rancher server cluster will revert Rancher to the version and state at the time of the snapshot. - -For information on how to roll back Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks) - -> Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. - -# Rolling Back to Rancher v2.0-v2.1 - -Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved [here]({{}}/rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1) and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/_index.md deleted file mode 100644 index 0929a70d6..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/_index.md +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: Upgrades -weight: 2 -aliases: - - /rancher/v2.x/en/upgrades/upgrades - - /rancher/v2.x/en/installation/upgrades-rollbacks/upgrades - - /rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap - - /rancher/v2.x/en/upgrades/air-gap-upgrade/ - - /rancher/v2.x/en/upgrades/upgrades/ha - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/upgrades/ha - - /rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ - - /rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/ - - /rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha - - /rancher/v2.x/en/installation/upgrades-rollbacks/ - - /rancher/v2.x/en/upgrades/ ---- -The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. - -For the instructions to upgrade Rancher installed on Kubernetes with RancherD, refer to [this page.]({{}}/rancher/v2.x/en/installation/install-rancher-on-linux/upgrades) - -For the instructions to upgrade Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) - -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - -If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). - -- [Prerequisites](#prerequisites) -- [Upgrade Outline](#upgrade-outline) -- [Known Upgrade Issues](#known-upgrade-issues) -- [RKE Add-on Installs](#rke-add-on-installs) - -# Prerequisites - -### Access to kubeconfig - -Helm should be run from the same location as your kubeconfig file, or the same location where you run your kubectl commands from. - -If you installed Kubernetes with RKE, the config will have been created in the directory you ran `rke up` in. - -The kubeconfig can also be manually targeted for the intended cluster with the `--kubeconfig` tag (see: https://helm.sh/docs/helm/helm/) - -### Review Known Issues - -Review the [known upgrade issues](#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. - -A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren't supported. - -### Helm Version - -The upgrade instructions assume you are using Helm 3. - -For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here]({{}}/rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -### For air gap installs: Populate private registry - --For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -### For upgrades from a Rancher server with a hidden local cluster - -If you are upgrading to Rancher v2.5 from a Rancher server that was started with the Helm chart option `--add-local=false`, you will need to drop that flag when upgrading. Otherwise, the Rancher server will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. For more information, see [this section.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#upgrading-from-rancher-with-a-hidden-local-cluster) - -### For upgrades from v2.0-v2.2 with external TLS termination - -If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) - -### For upgrades with cert-manager older than 0.8.0 - -[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) - -# Upgrade Outline - -Follow the steps to upgrade Rancher server: - -- [1. Back up your Kubernetes cluster that is running Rancher server](#1-back-up-your-kubernetes-cluster-that-is-running-rancher-server) -- [2. Update the Helm chart repository](#2-update-the-helm-chart-repository) -- [3. Upgrade Rancher](#3-upgrade-rancher) -- [4. Verify the Upgrade](#4-verify-the-upgrade) - -# 1. Back up Your Kubernetes Cluster that is Running Rancher Server - -For Rancher v2.5+, use the [backup application]({{}}/rancher/v2.x/en/backups/v2.5/back-up-rancher) to back up Rancher. - -For Rancher v2.0-v2.4, [take a one-time snapshot]({{}}/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-b-one-time-snapshots) -of your Kubernetes cluster running Rancher server. - -You'll use the backup as a restoration point if something goes wrong during upgrade. - -# 2. Update the Helm chart repository - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -1. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.x/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -1. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest charts and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - You can fetch the chart for the specific version you are upgrading to by adding in the `--version=` tag. For example: - - ```plain - helm fetch rancher-/rancher --version=v2.4.11 - ``` - -# 3. Upgrade Rancher - -This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. - -{{% tabs %}} -{{% tab "Kubernetes Upgrade" %}} - -Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. - -``` -helm get values rancher -n cattle-system - -hostname: rancher.my.org -``` - -> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. - -If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow [Option B: Reinstalling Rancher and cert-manager.](#option-b-reinstalling-rancher-and-cert-manager) - -Otherwise, follow [Option A: Upgrading Rancher.](#option-a-upgrading-rancher) - -### Option A: Upgrading Rancher - -Upgrade Rancher to the latest version with all your settings. - -Take all the values from the previous step and append them to the command using `--set key=value`: - -``` -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -> **Note:** The above is an example, there may be more values from the previous step that need to be appended. - -Alternatively, it's possible to export the current values to a file and reference that file during upgrade. For example, to only change the Rancher version: - -``` -helm get values rancher -n cattle-system -o yaml > values.yaml - -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - -f values.yaml \ - --version=2.4.5 -``` - -### Option B: Reinstalling Rancher and cert-manager - -If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. - -1. Uninstall Rancher - - ``` - helm delete rancher -n cattle-system - ``` - -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page. - -3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. - - ``` - helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org - ``` - -{{% /tab %}} - -{{% tab "Kubernetes Air Gap Upgrade" %}} - -Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -Based on the choice you made during installation, complete one of the procedures below. - -Placeholder | Description -------------|------------- -`` | The version number of the output tarball. -`` | The DNS name you pointed at your load balancer. -`` | The DNS name for your private registry. -`` | Cert-manager version running on k8s cluster. - - -### Option A: Default Self-signed Certificate - - ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -### Option B: Certificates from Files using Kubernetes Secrets - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set privateCA=true \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -### Apply the Rendered Templates - -Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - -Use `kubectl` to apply the rendered manifests. - -```plain -kubectl -n cattle-system apply -R -f ./rancher -``` - -{{% /tab %}} -{{% /tabs %}} - -# 4. Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). - -# Known Upgrade Issues - -The following table lists some of the most noteworthy issues to be considered when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -Upgrade Scenario | Issue ----|--- -Upgrading to v2.4.6 or v2.4.7 | These Rancher versions had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. -Upgrading to v2.3.0+ | Any user provisioned cluster will be automatically updated upon any edit as tolerations were added to the images used for Kubernetes provisioning. -Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{}}/rancher/v2.x/en/installation/resources/local-system-charts/). -Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. -Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration) - -# RKE Add-on Installs - -**Important: RKE add-on install is only supported up to Rancher v2.0.8** - -Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/). - -If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md deleted file mode 100644 index 4037760c9..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Upgrading Rancher Installed on Kubernetes with Helm 2 -weight: 1050 -aliases: - - /rancher/v2.x/en/upgrades/upgrades/ha/helm2 - - /rancher/v2.x/en/upgrades/helm2 - - /rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha/helm2 - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha/helm2 - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/helm2 ---- - -> Helm 3 has been released. If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. -> -> The [current instructions for Upgrading Rancher Installed on Kubernetes](https://rancher.com/docs/rancher/v2.x/en/upgrades/upgrades/ha/) use Helm 3. -> -> This section provides a copy of the older instructions for upgrading Rancher with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -The following instructions will guide you through using Helm to upgrade a Rancher server that is installed on a Kubernetes cluster. - -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - -If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). - ->**Notes:** -> -> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) -> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) -> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha/helm2) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.x/en/upgrades/upgrades) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) -- **For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Upgrade Outline - -Follow the steps to upgrade Rancher server: - -- [A. Back up your Kubernetes cluster that is running Rancher server](#a-back-up-your-kubernetes-cluster-that-is-running-rancher-server) -- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) -- [C. Upgrade Rancher](#c-upgrade-rancher) -- [D. Verify the Upgrade](#d-verify-the-upgrade) - -### A. Back up Your Kubernetes Cluster that is Running Rancher Server - -[Take a one-time snapshot]({{}}/rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-b-one-time-snapshots) -of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restore point if something goes wrong during upgrade. - -### B. Update the Helm chart repository - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -1. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.x/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -1. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest charts and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - -### C. Upgrade Rancher - -This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. - -{{% tabs %}} -{{% tab "Kubernetes Upgrade" %}} - -Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. - -``` -helm get values rancher - -hostname: rancher.my.org -``` - -> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. - -If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow `Option B: Reinstalling Rancher`. Otherwise, follow `Option A: Upgrading Rancher`. - -{{% accordion label="Option A: Upgrading Rancher" %}} - -Upgrade Rancher to the latest version with all your settings. - -Take all the values from the previous step and append them to the command using `--set key=value`. Note: There will be many more options from the previous step that need to be appended. - -``` -helm upgrade --install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` -{{% /accordion %}} - -{{% accordion label="Option B: Reinstalling Rancher chart" %}} - -If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. - -1. Uninstall Rancher - - ``` - helm delete rancher - ``` - - In case this results in an error that the release "rancher" was not found, make sure you are using the correct deployment name. Use `helm list` to list the helm-deployed releases. - -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions) page. - -3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. - - ``` - helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org - ``` - -{{% /accordion %}} - -{{% /tab %}} - -{{% tab "Kubernetes Air Gap Upgrade" %}} - -1. Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - Based on the choice you made during installation, complete one of the procedures below. - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - -{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} - - ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} -{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set privateCA=true \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} - -2. Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - - Use `kubectl` to apply the rendered manifests. - - ```plain - kubectl -n cattle-system apply -R -f ./rancher - ``` - -{{% /tab %}} -{{% /tabs %}} - -### D. Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back - -Should something go wrong, follow the [roll back]({{}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md deleted file mode 100644 index cb8f88198..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Migrating from a Kubernetes Install with an RKE Add-on -weight: 1030 -aliases: - - /rancher/v2.x/en/upgrades/ha-server-upgrade/ - - /rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/ - - /rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on - - /rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/migrating-from-rke-add-on - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/migrating-from-rke-add-on ---- - -> **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, please follow these directions to migrate to the Helm install. - - -The following instructions will help guide you through migrating from the RKE Add-on install to managing Rancher with the Helm package manager. - -You will need the to have [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) installed and the kubeconfig YAML file (`kube_config_rancher-cluster.yml`) generated by RKE. - -> **Note:** This guide assumes a standard Rancher install. If you have modified any of the object names or namespaces, please adjust accordingly. - -> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. - -### Point kubectl at your Rancher Cluster - -Make sure `kubectl` is using the correct kubeconfig YAML file. Set the `KUBECONFIG` environmental variable to point to `kube_config_rancher-cluster.yml`: - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -After setting the `KUBECONFIG` environment variable, verify that it contains the correct `server` parameter. It should point directly to one of your cluster nodes on port `6443`. - -``` -kubectl config view -o=jsonpath='{.clusters[*].cluster.server}' -https://NODE:6443 -``` - -If the output from the command shows your Rancher hostname with the suffix `/k8s/clusters`, the wrong kubeconfig YAML file is configured. It should be the file that was created when you used RKE to create the cluster to run Rancher. - -### Save your certificates - -If you have terminated ssl on the Rancher cluster ingress, recover your certificate and key for use in the Helm install. - -Use `kubectl` to get the secret, decode the value and direct the output to a file. - -``` -kubectl -n cattle-system get secret cattle-keys-ingress -o jsonpath --template='{ .data.tls\.crt }' | base64 -d > tls.crt -kubectl -n cattle-system get secret cattle-keys-ingress -o jsonpath --template='{ .data.tls\.key }' | base64 -d > tls.key -``` - -If you specified a private CA root cert - -``` -kubectl -n cattle-system get secret cattle-keys-server -o jsonpath --template='{ .data.cacerts\.pem }' | base64 -d > cacerts.pem -``` - -### Remove previous Kubernetes objects - -Remove the Kubernetes objects created by the RKE install. - -> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install]({{}}/rancher/v2.x/en/backups/backups/ha-backups) for details. - -``` -kubectl -n cattle-system delete ingress cattle-ingress-http -kubectl -n cattle-system delete service cattle-service -kubectl -n cattle-system delete deployment cattle -kubectl -n cattle-system delete clusterrolebinding cattle-crb -kubectl -n cattle-system delete serviceaccount cattle-admin -``` - -### Remove addons section from `rancher-cluster.yml` - -The addons section from `rancher-cluster.yml` contains all the resources needed to deploy Rancher using RKE. By switching to Helm, this part of the cluster configuration file is no longer needed. Open `rancher-cluster.yml` in your favorite text editor and remove the addons section: - ->**Important:** Make sure you only remove the addons section from the cluster configuration file. - -``` -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -# Remove addons section from here til end of file -addons: |- - --- - ... -# End of file -``` - -### Follow Helm and Rancher install steps - -From here follow the standard install steps. - -* [3 - Initialize Helm]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) -* [4 - Install Rancher]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md deleted file mode 100644 index 42dc9a649..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Upgrading to v2.0.7+ — Namespace Migration -weight: 1040 -aliases: - - /rancher/v2.x/en/upgrades/upgrades/namespace-migration - - /rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/namespace-migration - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/namespace-migration ---- ->This section applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. - -In Rancher v2.0.6 and prior, system namespaces crucial for Rancher and Kubernetes operations were not assigned to any Rancher project by default. Instead, these namespaces existed independently from all Rancher projects, but you could move these namespaces into any project without affecting cluster operations. - -These namespaces include: - -- `kube-system` -- `kube-public` -- `cattle-system` -- `cattle-alerting`1 -- `cattle-logging`1 -- `cattle-pipeline`1 -- `ingress-nginx` - ->1 Only displays if this feature is enabled for the cluster. - -However, with the release of Rancher v2.0.7, the `System` project was introduced. This project, which is automatically created during the upgrade, is assigned the system namespaces above to hold these crucial components for safe keeping. - -During upgrades from Rancher v2.0.6- to Rancher v2.0.7+, all system namespaces are moved from their default location outside of all projects into the newly created `System` project. However, if you assigned any of your system namespaces to a project before upgrading, your cluster networking may encounter issues afterwards. This issue occurs because the system namespaces are not where the upgrade expects them to be during the upgrade, so it cannot move them to the `System` project. - -- To prevent this issue from occurring before the upgrade, see [Preventing Cluster Networking Issues](#preventing-cluster-networking-issues). -- To fix this issue following upgrade, see [Restoring Cluster Networking](#restoring-cluster-networking). - -> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. - -## Preventing Cluster Networking Issues - -You can prevent cluster networking issues from occurring during your upgrade to v2.0.7+ by unassigning system namespaces from all of your Rancher projects. Complete this task if you've assigned any of a cluster's system namespaces into a Rancher project. - -1. Log into the Rancher UI before upgrade. - -1. From the context menu, open the **local** cluster (or any of your other clusters). - -1. From the main menu, select **Project/Namespaces**. - -1. Find and select the following namespaces. Click **Move** and then choose **None** to move them out of your projects. Click **Move** again. - - >**Note:** Some or all of these namespaces may already be unassigned from all projects. - - - `kube-system` - - `kube-public` - - `cattle-system` - - `cattle-alerting`1 - - `cattle-logging`1 - - `cattle-pipeline`1 - - `ingress-nginx` - - >1 Only displays if this feature is enabled for the cluster. - -
Moving namespaces out of projects
- ![Moving Namespaces]({{}}/img/rancher/move-namespaces.png) - -1. Repeat these steps for each cluster where you've assigned system namespaces to projects. - -**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades). - -## Restoring Cluster Networking - -Reset the cluster nodes' network policies to restore connectivity. - ->**Prerequisites:** -> ->Download and setup [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -{{% tabs %}} -{{% tab "Kubernetes Install" %}} -1. From **Terminal**, change directories to your kubectl file that's generated during Rancher install, `kube_config_rancher-cluster.yml`. This file is usually in the directory where you ran RKE during Rancher installation. - -1. Before repairing networking, run the following two commands to make sure that your nodes have a status of `Ready` and that your cluster components are `Healthy`. - - ``` - kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes - - NAME STATUS ROLES AGE VERSION - 165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1 - 165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1 - 165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1 - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cs - - NAME STATUS MESSAGE ERROR - scheduler Healthy ok - controller-manager Healthy ok - etcd-0 Healthy {"health": "true"} - etcd-2 Healthy {"health": "true"} - etcd-1 Healthy {"health": "true"} - ``` - -1. Check the `networkPolicy` for all clusters by running the following command. - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o=custom-columns=ID:.metadata.name,NAME:.spec.displayName,NETWORKPOLICY:.spec.enableNetworkPolicy,APPLIEDNP:.status.appliedSpec.enableNetworkPolicy,ANNOTATION:.metadata.annotations."networking\.management\.cattle\.io/enable-network-policy" - - ID NAME NETWORKPOLICY APPLIEDNP ANNOTATION - c-59ptz custom - local local - - -1. Disable the `networkPolicy` for all clusters, still pointing toward your `kube_config_rancher-cluster.yml`. - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o jsonpath='{range .items[*]}{@.metadata.name}{"\n"}{end}' | xargs -I {} kubectl --kubeconfig kube_config_rancher-cluster.yml patch cluster {} --type merge -p '{"spec": {"enableNetworkPolicy": false},"status": {"appliedSpec": {"enableNetworkPolicy": false }}}' - - >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): - > - >``` - kubectl --kubeconfig kube_config_rancher-cluster.yml patch cluster local --type merge -p '{"spec": {"enableNetworkPolicy": false},"status": {"appliedSpec": {"enableNetworkPolicy": false }}}' - ``` - -1. Remove annotations for network policy for all clusters - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o jsonpath='{range .items[*]}{@.metadata.name}{"\n"}{end}' | xargs -I {} kubectl --kubeconfig kube_config_rancher-cluster.yml annotate cluster {} "networking.management.cattle.io/enable-network-policy"="false" --overwrite - - >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): - > - >``` - kubectl --kubeconfig kube_config_rancher-cluster.yml annotate cluster local "networking.management.cattle.io/enable-network-policy"="false" --overwrite - ``` - -1. Check the `networkPolicy` for all clusters again to make sure the policies have a status of `false `. - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o=custom-columns=ID:.metadata.name,NAME:.spec.displayName,NETWORKPOLICY:.spec.enableNetworkPolicy,APPLIEDNP:.status.appliedSpec.enableNetworkPolicy,ANNOTATION:.metadata.annotations."networking\.management\.cattle\.io/enable-network-policy" - - ID NAME NETWORKPOLICY APPLIEDNP ANNOTATION - c-59ptz custom false false false - local local false false false - -1. Remove all network policies from all namespaces. Run this command for each cluster, using the kubeconfig generated by RKE. - - ``` - for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml -n $namespace delete networkpolicy --all; - done - ``` - -1. Remove all the projectnetworkpolicies created for the clusters, to make sure networkpolicies are not recreated. - - ``` - for cluster in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get clusters -o custom-columns=NAME:.metadata.name --no-headers); do - for project in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get project -n $cluster -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml delete projectnetworkpolicy -n $project --all - done - done - ``` - - >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): - > - >``` - for project in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get project -n local -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml -n $project delete projectnetworkpolicy --all; - done - ``` - -1. Wait a few minutes and then log into the Rancher UI. - - - If you can access Rancher, you're done, so you can skip the rest of the steps. - - If you still can't access Rancher, complete the steps below. - -1. Force your pods to recreate themselves by entering the following command. - - ``` - kubectl --kubeconfig kube_config_rancher-cluster.yml delete pods -n cattle-system --all - ``` - -1. Log into the Rancher UI and view your clusters. Created clusters will show errors from attempting to contact Rancher while it was unavailable. However, these errors should resolve automatically. - -{{% /tab %}} -{{% tab "Rancher Launched Kubernetes" %}} -
-If you can access Rancher, but one or more of the clusters that you launched using Rancher has no networking, you can repair them by moving the - -- Using the cluster's [embedded kubectl shell]({{}}/rancher/v2.x/en/k8s-in-rancher/kubectl/). -- By [downloading the cluster kubeconfig file and running it]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl) from your workstation. - - ``` - for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml -n $namespace delete networkpolicy --all; - done - ``` - -{{% /tab %}} -{{% /tabs %}} - - diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-linux/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-linux/_index.md deleted file mode 100644 index 5ab98e108..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-linux/_index.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: Install/Upgrade Rancher on a Linux OS -weight: 3 ---- - -_Available as of Rancher v2.5.4_ - -> This is an experimental feature. - -We are excited to introduce a new, simpler way to install Rancher called RancherD. - -RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. - -- [About RancherD Installs](#about-rancherd-installs) -- [Prerequisites](#prerequisites) -- [Part I: Installing Rancher](#part-i-installing-rancher) -- [Part II: High Availability](#part-ii-high-availability) -- [Upgrades](#upgrades) -- [Configuration](#configuration) -- [Uninstall](#uninstall) -- [RKE2 Documentation](#rke2-documentation) - -# About RancherD Installs - -When RancherD is launched on a host, it first installs an RKE2 Kubernetes cluster, then deploys Rancher on the cluster as a Kubernetes daemonset. - -In both the RancherD install and the Helm CLI install, Rancher is installed as a Helm chart on a Kubernetes cluster. - -Configuration and upgrading are also simplified with RancherD. When you upgrade the RancherD binary, both the Kubernetes cluster and the Rancher Helm chart are upgraded. - -In Part I of these instructions, you'll learn how to launch RancherD on a single node. The result of following the steps in Part I is a single-node [RKE2](https://docs.rke2.io/) Kubernetes cluster with the Rancher server installed. This cluster can easily become high availability later. If Rancher only needs to manage the local Kubernetes cluster, the installation is complete. - -Part II explains how to convert the single-node Rancher installation into a high-availability installation. If the Rancher server will manage downstream Kubernetes clusters, it is important to follow these steps. A discussion of recommended architecture for highly available Rancher deployments can be found in our [Best Practices Guide.]({{}}/rancher/v2.x/en/best-practices/v2.5/rancher-server) - -# Prerequisites - -### Node Requirements - -RancherD must be launched on a Linux OS. At this time, only OSes that leverage systemd are supported. - -The Linux node needs to fulfill the [installation requirements]({{}}/rancher/v2.x/en/installation/requirements) for hardware and networking. Docker is not required for RancherD installs. - -To install RancherD on SELinux Enforcing CentOS 8 nodes or RHEL 8 nodes, some [additional steps]({{}}/rancher/v2.x/en/installation/requirements/#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) are required. -### Root Access - -Before running the installation commands, you will need to log in as root: - -``` -sudo -s -``` - -### Fixed Registration Address - -A fixed registration address is recommended for single-node installs and required for high-availability installs with RancherD. - -The fixed registration address is an endpoint that is used for two purposes: - -- To access the Kubernetes API. So you can, for example, modify your [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file to point to it instead of a specific node. -- To add new nodes to the Kubernetes cluster. To add nodes to the cluster later, you will run a command on the node that will specify the fixed registration address of the cluster. - -If you are installing Rancher on a single node, the fixed registration address makes it possible to add more nodes to the cluster so that you can convert the single-node install to a high-availability install without causing downtime to the cluster. If you don't set up this address when installing the single-node Kubernetes cluster, you would need to re-run the installation script with a fixed registration address in order to add new nodes to the cluster. - -The fixed registration can be the IP or hostname of any of the server nodes, but in many cases those may change over time as nodes are created and destroyed. Therefore, you should have a stable endpoint in front of the server nodes. - -This endpoint can be set up using any number of approaches, such as: - -* A layer 4 (TCP) load balancer -* Round-robin DNS -* Virtual or elastic IP addresses - -The following should be taken into consideration when configuring the load balancer or other endpoint: - -- The RancherD server process listens on port 9345 for new nodes to register. -- The Kubernetes API is served on port 6443, as normal. -- In RancherD installs, the Rancher UI is served on port 8443 by default. (This is different from Helm chart installs, where port 443 is used by default.) - -# Part I: Installing Rancher - -### 1. Set up Configurations - -To avoid certificate errors with the fixed registration address, you should launch the server with the `tls-san` parameter set. This parameter should refer to your fixed registration address. - -This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access the Kubernetes cluster via both the IP and the hostname. - -Create the RancherD config file at `/etc/rancher/rke2/config.yaml`: - -```yaml -token: my-shared-secret -tls-san: - - my-fixed-registration-address.com - - another-kubernetes-domain.com -``` - -The first server node establishes the secret token that other nodes would register with if they are added to the cluster. - -If you do not specify a pre-shared secret, RancherD will generate one and place it at `/var/lib/rancher/rke2/server/node-token`. - -To specify your own pre-shared secret as the token, set the `token` argument on startup. - -Installing Rancher this way will use Rancher-generated certificates. To use your own self-signed or trusted certificates, refer to the [configuration guide.]({{}}/rancher/v2.x/en/installation/install-rancher-on-linux/rancherd-configuration/#certificates-for-the-rancher-server) - -For information on customizing the RancherD Helm chart values.yaml, refer to [this section.]({{}}/rancher/v2.x/en/installation/install-rancher-on-linux/rancherd-configuration/#customizing-the-rancherd-helm-chart) - -### 2. Launch the first server node - -Run the RancherD installer: - -``` -curl -sfL https://get.rancher.io | sh - -``` - -The RancherD version can be specified using the `INSTALL_RANCHERD_VERSION` environment variable: - -``` -curl -sfL https://get.rancher.io | INSTALL_RANCHERD_VERSION=v2.5.4-rc6 sh - -``` - -Once installed, the `rancherd` binary will be on your PATH. You can check out its help text like this: - -``` -rancherd --help -NAME: - rancherd - Rancher Kubernetes Engine 2 -... -``` - -Next, launch RancherD: - -``` -systemctl enable rancherd-server.service -systemctl start rancherd-server.service -``` - -When RancherD launches, it installs an RKE2 Kubernetes cluster. Use the following command to see the logs of the Kubernetes cluster as it comes up: - -``` -journalctl -eu rancherd-server -f -``` - -### 3. Set up the kubeconfig file with kubectl - -Once the Kubernetes cluster is up, set up RancherD’s kubeconfig file and `kubectl`: - -``` -export KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=$PATH:/var/lib/rancher/rke2/bin -``` - -### 4. Verify that Rancher is installed on the Kubernetes cluster - -Now, you can start issuing `kubectl` commands. Use the following commands to verify that Rancher is deployed as a daemonset on the cluster: - -``` -kubectl get daemonset rancher -n cattle-system -kubectl get pod -n cattle-system -``` - -If you watch the pods, you will see the following pods installed: - -- `helm-operation` pods in the `cattle-system` namespace -- a `rancher` pod and `rancher-webhook` pod in the `cattle-system` namespace -- a `fleet-agent`, `fleet-controller`, and `gitjob` pod in the `fleet-system` namespace -- a `rancher-operator` pod in the `rancher-operator-system` namespace - -### 5. Set the initial Rancher password - -Once the `rancher` pod is up and running, run the following: - -``` -rancherd reset-admin -``` - -This will give you the URL, username and password needed to log into Rancher. Follow that URL, plug in the credentials, and you’re up and running with Rancher! - -If Rancher will only manage the local Kubernetes cluster, the installation is complete. - -# Part II: High Availability - -If you plan to use the Rancher server to manage downstream Kubernetes clusters, Rancher needs to be highly available. In these steps, you will add more nodes to achieve a high-availability cluster. Since Rancher is running as a daemonset, it will automatically launch on the nodes you add. - -An odd number of nodes is required because the etcd cluster, which contains the cluster data, needs a majority of live nodes to avoid losing quorum. A loss of quorum could require the cluster to be restored from backup. Therefore, we recommend using three nodes. - -When following these steps, you should still be logged in as root. - -### 1. Configure the fixed registration address on a new node - -Additional server nodes are launched much like the first, except that you must specify the `server` and `token` parameters so that they can successfully connect to the initial server node. - -Here is an example of what the RancherD config file would look like for additional server nodes. By default, this config file is expected to be located at `/etc/rancher/rke2/config.yaml`. - -```yaml -server: https://my-fixed-registration-address.com:9345 -token: my-shared-secret -tls-san: - - my-fixed-registration-address.com - - another-kubernetes-domain.com -``` - -### 2. Launch an additional server node - -Run the installer on the new node: - -``` -curl -sfL https://get.rancher.io | sh - -``` - -This will download RancherD and install it as a systemd unit on your host. - - -Next, launch RancherD: - -``` -systemctl enable rancherd-server.service -systemctl start rancherd-server.service -``` - -### 3. Repeat - -Repeat steps one and two for another Linux node, bringing the number of nodes in the cluster to three. - -**Result:** Rancher is highly available and the installation is complete. - -# Upgrades - -For information on upgrades and rollbacks, refer to [this page.](./upgrades) - -# Configuration - -For information on how to configure certificates, node taints, Rancher Helm chart options, or RancherD CLI options, refer to the [configuration reference.](./rancherd-configuration) - -# Uninstall - -To uninstall RancherD from your system, run the command below. This will shut down the process, remove the RancherD binary, and clean up files used by RancherD. - -``` -rancherd-uninstall.sh -``` - -# RKE2 Documentation - -For more information on RKE2, the Kubernetes distribution used to provision the underlying cluster, refer to the documentation [here.](https://docs.rke2.io/) diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-linux/rancherd-configuration/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-linux/rancherd-configuration/_index.md deleted file mode 100644 index ca79f69ce..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-linux/rancherd-configuration/_index.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: RancherD Configuration Reference -weight: 1 ---- - -> RancherD is an experimental feature. - -In RancherD, a server node is defined as a machine (bare-metal or virtual) running the `rancherd server` command. The server runs the Kubernetes API as well as Kubernetes workloads. - -An agent node is defined as a machine running the `rancherd agent` command. They don't run the Kubernetes API. To add nodes designated to run your apps and services, join agent nodes to your cluster. - -In the RancherD installation instructions, we recommend running three server nodes in the Rancher server cluster. Agent nodes are not required. - -- [Certificates for the Rancher Server](#certificates-for-the-rancher-server) -- [Node Taints](#node-taints) -- [Customizing the RancherD Helm Chart](#customizing-the-rancherd-helm-chart) -- [RancherD Server CLI Options](#rancherd-server-cli-options) -- [RancherD Agent CLI Options](#rancherd-agent-cli-options) - -# Certificates for the Rancher Server - -Rancherd does not use cert-manger to provision certs. Instead RancherD allows you to bring your own self-signed or trusted certs by storing the .pem files in `/etc/rancher/ssl/`. When doing this you should also set the `publicCA` parameter to `true` in your HelmChartConfig. For more information on the HelmChartConfig, refer to the section about [customizing the RancherD Helm chart.](#customizing-the-rancherd-helm-chart) - -Private key: `/etc/rancher/ssl/key.pem` - -Certificate: `/etc/rancher/ssl/cert.pem` - -CA Certificate(self-signed): `/etc/rancher/ssl/cacerts.pem` - -Additional CA Certificate: `/etc/ssl/certs/ca-additional.pem` - -# Node Taints - -By default, server nodes will be schedulable and thus your workloads can get launched on them. If you wish to have a dedicated control plane where no user workloads will run, you can use taints. The node-taint parameter will allow you to configure nodes with taints. Here is an example of adding a node taint to the `config.yaml`: - -``` -node-taint: - - "CriticalAddonsOnly=true:NoExecute" -``` -# Customizing the RancherD Helm Chart - -Rancher is launched as a [Helm](https://helm.sh/) chart using the cluster’s [Helm integration.](https://docs.rke2.io/helm/) This means that you can easily customize the application through a manifest file describing your custom parameters. - -The RancherD chart provisions Rancher in a daemonset. It exposes hostport `8080/8443` down to the container port (`80/443`), and uses hostpath to mount certs if needed. - -RancherD uses `helm-controller` to bootstrap the RancherD chart. To provide a customized `values.yaml` file, the configuration options must be passed in through the `helm-controller` custom resource definition. - -Here is an example of the manifest: - -```yaml -apiVersion: helm.cattle.io/v1 -kind: HelmChartConfig -metadata: - name: rancher - namespace: kube-system -spec: - valuesContent: | - publicCA: true -``` - -Put this manifest on your host in `/var/lib/rancher/rke2/server/manifests` before running RancherD. - -### Common Options - -| Parameter | Default Value | Description | -| ------------------------------ | ----------------------------------------------------- | -------------------------------------------- | -| `addLocal` | "auto" | ***string*** - Have Rancher detect and import the local Rancher server cluster | -| `auditLog.destination` | "sidecar" | ***string*** - Stream to sidecar container console or hostPath volume - *"sidecar, hostPath"* | -| `auditLog.hostPath` | "/var/log/rancher/audit" | ***string*** - log file destination on host (only applies when **auditLog.destination** is set to **hostPath**) | -| `auditLog.level` | 0 | ***int*** - set the [API Audit Log level](https://rancher.com/docs/rancher/v2.x/en/installation/api-auditing). 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | ***int*** - maximum number of days to retain old audit log files (only applies when **auditLog.destination** is set to **hostPath**) | -| `auditLog.maxBackups` | 1 | int - maximum number of audit log files to retain (only applies when **auditLog.destination** is set to **hostPath**) | -| `auditLog.maxSize` | 100 | ***int*** - maximum size in megabytes of the audit log file before it gets rotated (only applies when **auditLog.destination** is set to **hostPath**) | -| `debug` | false | ***bool*** - set debug flag on rancher server | -| `extraEnv` | [] | ***list*** - set additional environment variables for Rancher Note: *Available as of v2.2.0* | -| `imagePullSecrets` | [] | ***list*** - list of names of Secret resource containing private registry credentials | -| `proxy` | " " | ***string** - HTTP[S] proxy server for Rancher | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" | ***string*** - comma separated list of hostnames or ip address not to use the proxy | -| `resources` | {} | ***map*** - rancher pod resource requests & limits | -| `rancherImage` | "rancher/rancher" | ***string*** - rancher image source | -| `rancherImageTag` | same as chart version | ***string*** - rancher/rancher image tag | -| `rancherImagePullPolicy` | "IfNotPresent" | ***string*** - Override imagePullPolicy for rancher server images - *"Always", "Never", "IfNotPresent"* | -| `systemDefaultRegistry` | "" | ***string*** - private registry to be used for all system Docker images, e.g., [http://registry.example.com/] *Available as of v2.3.0* | -| `useBundledSystemChart` | false | ***bool*** - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. *Available as of v2.3.0* | -| `publicCA` | false | ***bool*** - Set to true if your cert is signed by a public CA | - -# RancherD Server CLI Options - -The command to run the Rancher management server is: - -``` -rancherd server [OPTIONS] -``` - -It can be run with the following options: - -### Config - -| Option | Description | -|--------|-------------| -| `--config FILE, -c FILE` | Load configuration from FILE (default: "/etc/rancher/rke2/config.yaml") | - -### Logging - -| Option | Description | -|--------|-------------| -| `--debug` | Turn on debug logs | - -### Listener - -| Option | Description | -|--------|-------------| -| `--bind-address value` | RancherD bind address (default: 0.0.0.0) | -| `--advertise-address value` | IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip) | -| `--tls-san value` | Add additional hostname or IP as a Subject Alternative Name in the TLS cert | - -### Data - -| Option | Description | -|--------|-------------| -| `--data-dir value, -d value` | Folder to hold state (default: "/var/lib/rancher/rancherd") | - -### Networking - -| Option | Description | -|--------|-------------| -| `--cluster-cidr value` | Network CIDR to use for pod IPs (default: "10.42.0.0/16") | -| `--service-cidr value` | Network CIDR to use for services IPs (default: "10.43.0.0/16") | -| `--cluster-dns value` | Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10) | -| `--cluster-domain value` | Cluster Domain (default: "cluster.local") | - -### Cluster - -| Option | Description | -|--------|-------------| -| `--token value, -t value` | Shared secret used to join a server or agent to a cluster | -| `--token-file value` | File containing the cluster-secret/token | - -### Client - -| Option | Description | -|--------|-------------| -| `--write-kubeconfig value, -o value` | Write kubeconfig for admin client to this file | -| `--write-kubeconfig-mode value` | Write kubeconfig with this mode | - -### Flags - -| Option | Description | -|--------|-------------| -| `--kube-apiserver-arg value` | Customized flag for kube-apiserver process | -| `--kube-scheduler-arg value` | Customized flag for kube-scheduler process | -| `--kube-controller-manager-arg value` | Customized flag for kube-controller-manager process | - -### Database - -| Option | Description | -|--------|-------------| -| `--etcd-disable-snapshots` | Disable automatic etcd snapshots | -| `--etcd-snapshot-schedule-cron value` | Snapshot interval time in cron spec. eg. every 5 hours '* */5 * * *' (default: "0 */12 * * *") | -| `--etcd-snapshot-retention value` | Number of snapshots to retain (default: 5) | -| `--etcd-snapshot-dir value` | Directory to save db snapshots. (Default location: ${data-dir}/db/snapshots) | -| `--cluster-reset-restore-path value` | Path to snapshot file to be restored | - -### System Images Registry - -| Option | Description | -|--------|-------------| -| `--system-default-registry value` | Private registry to be used for all system Docker images | - -### Components - -| Option | Description | -|--------|-------------| -| `--disable value` | Do not deploy packaged components and delete any deployed components (valid items: rancherd-canal, rancherd-coredns, rancherd-ingress, rancherd-kube-proxy, rancherd-metrics-server) | - -### Cloud Provider - -| Option | Description | -|--------|-------------| -| `--cloud-provider-name value` | Cloud provider name | -| `--cloud-provider-config value` | Cloud provider configuration file path | - -### Security - -| Option | Description | -|--------|-------------| -| `--profile value` | Validate system configuration against the selected benchmark (valid items: cis-1.5) | - -### Agent Node - -| Option | Description | -|--------|-------------| -| `--node-name value` | Node name | -| `--node-label value` | Registering and starting kubelet with set of labels | -| `--node-taint value` | Registering kubelet with set of taints | -| `--protect-kernel-defaults` | Kernel tuning behavior. If set, error if kernel tunables are different than kubelet defaults. | -| `--selinux` | Enable SELinux in containerd | - -### Agent Runtime - -| Option | Description | -|--------|-------------| -| `--container-runtime-endpoint value` | Disable embedded containerd and use alternative CRI implementation | -| `--snapshotter value` | Override default containerd snapshotter (default: "overlayfs") | -| `--private-registry value` | Private registry configuration file (default: "/etc/rancher/rke2/registries.yaml") | - -### Agent Networking - -| Option | Description | -|--------|-------------| -| `--node-ip value, -i value` | IP address to advertise for node | -| `--resolv-conf value` | Kubelet resolv.conf file | - -### Agent Flags - -| Option | Description | -|--------|-------------| -| `--kubelet-arg value` | Customized flag for kubelet process | -| `--kube-proxy-arg value` | Customized flag for kube-proxy process | - -### Experimental - -| Option | Description | -|--------|-------------| -| `--agent-token value` | Shared secret used to join agents to the cluster, but not servers | -| `--agent-token-file value` | File containing the agent secret | -| `--server value, -s value` | Server to connect to, used to join a cluster | -| `--cluster-reset` | Forget all peers and become sole member of a new cluster | -| `--secrets-encryption` | Enable Secret encryption at rest | - - - -# RancherD Agent CLI Options - -The following command is used to run the RancherD agent: - -``` -rancherd agent [OPTIONS] -``` - -The following options are available. - -### Config - -| Option | Description | -|--------|-------------| -| `--config FILE, -c FILE` | Load configuration from FILE (default: "/etc/rancher/rke2/config.yaml") | - -### Data - -| Option | Description | -|--------|-------------| -| `--data-dir value, -d value` | Folder to hold state (default: "/var/lib/rancher/rancherd") | - -### Logging - -| Option | Description | -|--------|-------------| -| `--debug` | Turn on debug logs | - -### Cluster - -| Option | Description | -|--------|-------------| -| `--token value, -t value` | Token to use for authentication | -| `--token-file value` | Token file to use for authentication | -| `--server value, -s value` | Server to connect to | - -### Agent Node - -| Option | Description | -|--------|-------------| -| `--node-name value` | Node name | -| `--node-label value` | Registering and starting kubelet with set of labels | -| `--node-taint value` | Registering kubelet with set of taints | -| `--selinux` | Enable SELinux in containerd | -| `--protect-kernel-defaults` | Kernel tuning behavior. If set, error if kernel tunables are different than kubelet defaults. | - -### Agent Runtime - -| Option | Description | -|--------|-------------| -| `--container-runtime-endpoint value` | Disable embedded containerd and use alternative CRI implementation | -| `--snapshotter value` | Override default containerd snapshotter (default: "overlayfs") | -| `--private-registry value` | Private registry configuration file (default: "/etc/rancher/rke2/registries.yaml") | - -### Agent Networking - -| Option | Description | -|--------|-------------| -| `--node-ip value, -i value` | IP address to advertise for node | -| `--resolv-conf value` | Kubelet resolv.conf file | - -### Agent Flags - -| Option | Description | -|--------|-------------| -| `--kubelet-arg value` | Customized flag for kubelet process | -| `--kube-proxy-arg value` | Customized flag for kube-proxy process | - -### System Images Registry - -| Option | Description | -|--------|-------------| -| `--system-default-registry value` | Private registry to be used for all system Docker images | - -### Cloud Provider - -| Option | Description | -|--------|-------------| -| `--cloud-provider-name value` | Cloud provider name | -| `--cloud-provider-config value` | Cloud provider configuration file path | - -### Security - -| Option | Description | -|--------|-------------| -| `--profile value` | Validate system configuration against the selected benchmark (valid items: cis-1.5) | \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-linux/rollbacks/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-linux/rollbacks/_index.md deleted file mode 100644 index 20870c06e..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-linux/rollbacks/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Rollbacks -weight: 3 ---- - -> RancherD is an experimental feature. - -To roll back Rancher to a previous version, re-run the installation script with the previous version specified in the `INSTALL_RANCHERD_VERSION` environment variable. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-linux/upgrades/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-linux/upgrades/_index.md deleted file mode 100644 index 623576dc0..000000000 --- a/content/rancher/v2.x/en/installation/install-rancher-on-linux/upgrades/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Upgrades -weight: 2 ---- - -> RancherD is an experimental feature. - -When RancherD is upgraded, the Rancher Helm controller and the Fleet pods are upgraded. - -During a RancherD upgrade, there is very little downtime, but it is possible that RKE2 may be down for a minute, during which you could lose access to Rancher. - -When Rancher is installed with RancherD, the underlying Kubernetes cluster can't be upgraded from the Rancher UI. It needs to be upgraded using the RancherD CLI. - -### Upgrading the Rancher Helm Chart without Upgrading the Underlying Cluster - -To upgrade Rancher without upgrading the underlying Kubernetes cluster, follow these steps. - -> Before upgrading, we recommend that you should: -> -> - Create a backup of the Rancher server using the [backup application.]({{}}/rancher/v2.x/en/backups/v2.5/back-up-rancher/) -> - Review the known issues for the Rancher version you are upgrading to. The known issues are listed in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -1. Uninstall the chart with Helm: - - ``` - helm uninstall rancher - ``` - -2. Reinstall the Rancher chart with Helm. To install a specific Rancher version, use the `--version` flag. For example: - - ``` - helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --version 2.5.1 - ``` - -**Result:** Rancher is upgraded to the new version. - -If necessary, restore Rancher from backup by following [these steps.]({{}}/rancher/v2.x/en/backups/v2.5/restoring-rancher/) - -### Upgrading Both Rancher and the Underlying Cluster - -Upgrade both RancherD and the underlying Kubernetes cluster by re-running the RancherD installation script. - -> Before upgrading, we recommend that you should: -> -> - Create a backup of the Rancher server using the [backup application.]({{}}/rancher/v2.x/en/backups/v2.5/back-up-rancher/) -> - Review the known issues for the Rancher version you are upgrading to. The known issues are listed in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -``` -sudo curl -sfL https://get.rancher.io | sudo sh - -``` - -To specify a specific version to upgrade to, use `INSTALL_RANCHERD_VERSION` environment variable: - -``` -curl -sfL https://get.rancher.io | INSTALL_RANCHERD_VERSION=v2.5.1 sh - -``` - -Then launch the server: - -``` -systemctl enable rancherd-server -systemctl start rancherd-server -``` - -The upgrade can also be performed by manually installing the binary of the desired version. - - diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/_index.md deleted file mode 100644 index b3cefbf06..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Other Installation Methods -weight: 3 ---- - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -The Docker installation is for development and testing environments only. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: - -- For Rancher v2.0-v2.4, there was no migration path from a Docker installation to a high-availability installation. Therefore, if you are using Rancher before v2.5, you may want to use a Kubernetes installation from the start. - -- For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.x/en/backups/v2.5/migrating-rancher/) \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/_index.md deleted file mode 100644 index 66ec384fa..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Air Gapped Helm CLI Install -weight: 1 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/ - - /rancher/v2.x/en/installation/air-gap-high-availability/ - - /rancher/v2.x/en/installation/air-gap-single-node/ ---- - -This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. - -For more information on each installation option, refer to [this page.]({{}}/rancher/v2.x/en/installation/) - -Throughout the installation instructions, there will be _tabs_ for each installation option. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. - -# Installation Outline - -1. [Set up infrastructure and private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) -2. [Collect and publish images to your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) -3. [Set up a Kubernetes cluster (Skip this step for Docker installations)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -4. [Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) - -# Upgrades - -To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/) - -### [Next: Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md deleted file mode 100644 index 259b63095..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 -aliases: - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/ - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.x/en/installation/air-gap-single-node/install-rancher - - /rancher/v2.x/en/installation/air-gap/install-rancher ---- - -This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -### Privileged Access for Rancher v2.5+ - -When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher in five parts: - -- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) -- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) -- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) -- [4. Install Rancher](#4-install-rancher) -- [5. For Rancher versions before v2.3.0, Configure System Charts](#5-for-rancher-versions-before-v2-3-0-configure-system-charts) - -# 1. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. - ```plain - helm fetch rancher-/rancher - ``` - - If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: - ```plain - helm fetch rancher-stable/rancher --version=v2.4.8 - ``` - -# 2. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -# 3. Render the Rancher Helm Template - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. - -### Option A: Default Self-Signed Certificate - -{{% accordion id="k8s-1" label="Click to expand" %}} - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - -1. From a system connected to the internet, add the cert-manager repo to Helm. - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v1.0.4 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - ```plain - helm template cert-manager ./cert-manager-v1.0.4.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller \ - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml - ``` - -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - - ```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -{{% /accordion %}} - -### Option B: Certificates From Files using Kubernetes Secrets - -{{% accordion id="k8s-2" label="Click to expand" %}} - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -{{% /accordion %}} - -# 4. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -### For Self-Signed Certificate Installs, Install Cert-manager - -{{% accordion id="install-cert-manager" label="Click to expand" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -### Install Rancher with kubectl - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` -**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.x/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -# 5. For Rancher versions before v2.3.0, Configure System Charts - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/resources/local-system-charts/). - -# Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.x/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users who want to test out Rancher. - -Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -> **Important:** For Rancher v2.0-v2.4, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -For Rancher v2.5+, the backup application can be used to migrate the Rancher server from a Docker install to a Kubernetes install using [these steps.]({{}}/rancher/v2.x/en/backups/v2.5/migrating-rancher/) - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -> **Do you want to...** -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). - -- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.x/en/installation/resources/local-system-charts/) - -Choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to install. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to install. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in PEM format. - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - --privileged - /rancher/rancher: -``` - -{{% /accordion %}} - -If you are installing Rancher v2.3.0+, the installation is complete. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.x/en/faq/telemetry/) during the initial login. - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/resources/local-system-charts/). - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md deleted file mode 100644 index 4fa8359ec..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: '3. Install Kubernetes (Skip for Docker Installs)' -weight: 300 -aliases: - - /rancher/v2.x/en/installation/air-gap-high-availability/install-kube ---- - -> Skip this section if you are installing Rancher on a single node with Docker. - -This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. - -For Rancher before v2.4, Rancher should be installed on an [RKE]({{}}/rke/latest/en/) (Rancher Kubernetes Engine) Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. - -In Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. The Rancher management server can only be run on a Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. - -As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes providers. - -The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown below. - -{{% tabs %}} -{{% tab "K3s" %}} - -In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. - -### Installation Outline - -1. [Prepare Images Directory](#1-prepare-images-directory) -2. [Create Registry YAML](#2-create-registry-yaml) -3. [Install K3s](#3-install-k3s) -4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) - -### 1. Prepare Images Directory -Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. - -Place the tar file in the `images` directory before starting K3s on each node, for example: - -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` - -### 2. Create Registry YAML -Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. - -The registries.yaml file should look like this before plugging in the necessary information: - -``` ---- -mirrors: - customreg: - endpoint: - - "https://ip-to-server:5000" -configs: - customreg: - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password - tls: - cert_file: - key_file: - ca_file: -``` - -Note, at this time only secure registries are supported with K3s (SSL with custom CA). - -For more information on private registries configuration file for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/private-registry/) - -### 3. Install K3s - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. - -Also obtain the K3s install script at https://get.k3s.io - -Place the binary in `/usr/local/bin` on each node. -Place the install script anywhere on each node, and name it `install.sh`. - -Install K3s on each server: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh -``` - -Install K3s on each agent: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh -``` - -Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. -The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` - ->**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -### 4. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -``` -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### Note on Upgrading - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. -2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. -3. Restart the K3s service (if not restarted automatically by installer). -{{% /tab %}} -{{% tab "RKE" %}} -We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. - -### 1. Install RKE - -Install RKE by following the instructions in the [RKE documentation.]({{}}/rke/latest/en/installation/) - -### 2. Create an RKE Config File - -From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. - -This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts) you created. - -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | -| `user` | ✓ | A user that can run Docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: - - address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: - - url: # private registry url - user: rancher - password: '*********' - is_default: true -``` - -### 3. Run RKE - -After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: - -``` -rke up --config ./rancher-cluster.yml -``` - -### 4. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ -{{% /tab %}} -{{% /tabs %}} - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md deleted file mode 100644 index 9231581f3..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 -aliases: - - /rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/ - - /rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/ - - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ ---- - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.x/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. - -The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. - -> **Prerequisites:** -> -> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. -> -> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) -2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) -3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) -4. [Populate the private registry](#4-populate-the-private-registry) - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### 1. Find the required assets for your Rancher version - -1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets.** Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### 2. Collect the cert-manager image - -> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v1.0.4 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### 4. Populate the private registry - -Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -_Available as of v2.3.0_ - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -# Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -1. Find the required assets for your Rancher version -2. Save the images to your Windows Server workstation -3. Prepare the Docker daemon -4. Populate the private registry - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|----------------------------|------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - ```plain - ./rancher-save-images.ps1 - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - - - -### 3. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - - - -### 4. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -# Linux Steps - -The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -1. Find the required assets for your Rancher version -2. Collect all the required images -3. Save the images to your Linux workstation -4. Populate the private registry - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets.** - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -|----------------------------| -------------------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.12.0 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - - - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - -**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - - - -### 4. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. - -The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - -```plain -docker login -``` - -1. Make `rancher-load-images.sh` an executable: - -``` -chmod +x rancher-load-images.sh -``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - -```plain -./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry -``` - - -{{% /tab %}} -{{% /tabs %}} - -### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md deleted file mode 100644 index 3af1d1472..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: '1. Set up Infrastructure and Private Registry' -weight: 100 -aliases: - - /rancher/v2.x/en/installation/air-gap-single-node/provision-host ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). - -An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. - -The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.x/en/installation/) - -As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster. The RKE and K3s Kubernetes infrastructure tutorials below are still included for convenience. - -{{% tabs %}} -{{% tab "K3s" %}} -We recommend setting up the following infrastructure for a high-availability installation: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set up one of the following external databases: - -* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) -* [MySQL](https://www.mysql.com/) (certified against version 5.7) -* [etcd](https://etcd.io/) (certified against version 3.3.15) - -When you install Kubernetes, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the database, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/rds) for setting up a MySQL database on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 5. Set up a Private Docker Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) -{{% /tab %}} -{{% tab "RKE" %}} - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 4. Set up a Private Docker Registry - -Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) - -{{% /tab %}} -{{% tab "Docker" %}} -> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. -> -> For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -> For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.x/en/backups/v2.5/migrating-rancher/) - -### 1. Set up a Linux Node - -This host will be disconnected from the Internet, but needs to be able to connect to your private registry. - -Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up a Private Docker Registry - -Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/_index.md deleted file mode 100644 index 88670561b..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Installing Rancher behind an HTTP Proxy -weight: 4 ---- - -In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. - -Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/). - -# Installation Outline - -1. [Set up infrastructure]({{}}/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/) -2. [Set up a Kubernetes cluster]({{}}/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) -3. [Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md deleted file mode 100644 index cfec9eaa3..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: 3. Install Rancher -weight: 300 ---- - -Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. - -> **Note:** These installation instructions assume you are using Helm 3. - -### Install cert-manager - -Add the cert-manager helm repository: - -``` -helm repo add jetstack https://charts.jetstack.io -``` - -Create a namespace for cert-manager: - -``` -kubectl create namespace cert-manager -``` - -Install the CustomResourceDefinitions of cert-manager: - -``` -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.2/cert-manager.crds.yaml -``` - -And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers: - -``` -helm upgrade --install cert-manager jetstack/cert-manager \ - --namespace cert-manager --version v0.15.2 \ - --set http_proxy=http://${proxy_host} \ - --set https_proxy=http://${proxy_host} \ - --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local -``` - -Now you should wait until cert-manager is finished starting up: - -``` -kubectl rollout status deployment -n cert-manager cert-manager -kubectl rollout status deployment -n cert-manager cert-manager-webhook -``` - -### Install Rancher - -Next you can install Rancher itself. First add the helm repository: - -``` -helm repo add rancher-latest https://releases.rancher.com/server-charts/latest -``` - -Create a namespace: - -``` -kubectl create namespace cattle-system -``` - -And install Rancher with Helm. Rancher also needs a proxy configuration so that it can communicate with external application catalogs or retrieve Kubernetes version update metadata: - -``` -helm upgrade --install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.example.com \ - --set proxy=http://${proxy_host} - --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local -``` - -After waiting for the deployment to finish: - -``` -kubectl rollout status deployment -n cattle-system rancher -``` - -You can now navigate to `https://rancher.example.com` and start using Rancher. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.x/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -### Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.x/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md deleted file mode 100644 index 0393ea433..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: '2. Install Kubernetes' -weight: 200 ---- - -Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. - -### Installing Docker - -First, you have to install Docker and setup the HTTP proxy on all three Linux nodes. For this perform the following steps on all three nodes. - -For convenience export the IP address and port of your proxy into an environment variable and set up the HTTP_PROXY variables for your current shell: - -``` -export proxy_host="10.0.0.5:8888" -export HTTP_PROXY=http://${proxy_host} -export HTTPS_PROXY=http://${proxy_host} -export NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,cattle-system.svc -``` - -Next configure apt to use this proxy when installing packages. If you are not using Ubuntu, you have to adapt this step accordingly: - -``` -cat <<'EOF' | sudo tee /etc/apt/apt.conf.d/proxy.conf > /dev/null -Acquire::http::Proxy "http://${proxy_host}/"; -Acquire::https::Proxy "http://${proxy_host}/"; -EOF -``` - -Now you can install Docker: - -``` -curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh -``` - -Then ensure that your current user is able to access the Docker daemon without sudo: - -``` -sudo usermod -aG docker YOUR_USERNAME -``` - -And configure the Docker daemon to use the proxy to pull images: - -``` -sudo mkdir -p /etc/systemd/system/docker.service.d -cat <<'EOF' | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf > /dev/null -[Service] -Environment="HTTP_PROXY=http://${proxy_host}" -Environment="HTTPS_PROXY=http://${proxy_host}" -Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" -EOF -``` - -To apply the configuration, restart the Docker daemon: - -``` -sudo systemctl daemon-reload -sudo systemctl restart docker -``` - -### Creating the RKE Cluster - -You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: - -* [RKE CLI binary]({{}}/rke/latest/en/installation/#download-the-rke-binary) - -``` -sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 -sudo chmod +x /usr/local/bin/rke -``` - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -``` -curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" -chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -``` - -* [helm](https://helm.sh/docs/intro/install/) - -``` -curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 -chmod +x get_helm.sh -sudo ./get_helm.sh -``` - -Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -``` -nodes: - - address: 10.0.1.200 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 10.0.1.201 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 10.0.1.202 - user: ubuntu - role: [controlplane,worker,etcd] - -services: - etcd: - backup_config: - interval_hours: 12 - retention: 6 -``` - -After that, you can create the Kubernetes cluster by running: - -``` -rke up --config rancher-cluster.yaml -``` - -RKE creates a state file called `rancher-cluster.rkestate`, this is needed if you want to perform updates, modify your cluster configuration or restore it from a backup. It also creates a `kube_config_rancher-cluster.yaml` file, that you can use to connect to the remote Kubernetes cluster locally with tools like kubectl or Helm. Make sure to save all of these files in a secure location, for example by putting them into a version control system. - -To have a look at your cluster run: - -``` -export KUBECONFIG=kube_config_rancher-cluster.yaml -kubectl cluster-info -kubectl get pods --all-namespaces -``` - -You can also verify that your external load balancer works, and the DNS entry is set up correctly. If you send a request to either, you should receive HTTP 404 response from the ingress controller: - -``` -$ curl 10.0.1.100 -default backend - 404 -$ curl rancher.example.com -default backend - 404 -``` - -### Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md deleted file mode 100644 index 2db3543c6..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: '1. Set up Infrastructure' -weight: 100 ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will connect to the internet through an HTTP proxy. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - - -### [Next: Set up a Kubernetes cluster]({{}}/rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/_index.md deleted file mode 100644 index 9b0753296..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/_index.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Installing Rancher on a Single Node Using Docker -description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. -weight: 2 -aliases: - - /rancher/v2.x/en/installation/single-node-install/ - - /rancher/v2.x/en/installation/single-node - - /rancher/v2.x/en/installation/other-installation-methods/single-node ---- - -Rancher can be installed by running a single Docker container. - -In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. - -> **Want to use an external load balancer?** -> See [Docker Install with an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/single-node-install-external-lb) instead. - -A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: - -- For Rancher v2.0-v2.4, there was no migration path from a Docker installation to a high-availability installation. Therefore, if you are using Rancher before v2.5, you may want to use a Kubernetes installation from the start. - -- For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.x/en/backups/v2.5/migrating-rancher/) - -### Privileged Access for Rancher v2.5+ - -When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. - -# Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -# 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements) to launch your Rancher server. - -# 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Use a proxy? See [HTTP Proxy Configuration]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/) -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) -> - Complete an Air Gap Installation? See [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) -> - Record all transactions with the Rancher API? See [API Auditing](./advanced/#api-audit-log) - -Choose from the following options: - -- [Option A: Default Rancher-generated Self-signed Certificate](#option-a-default-rancher-generated-self-signed-certificate) -- [Option B: Bring Your Own Certificate, Self-signed](#option-b-bring-your-own-certificate-self-signed) -- [Option C: Bring Your Own Certificate, Signed by a Recognized CA](#option-c-bring-your-own-certificate-signed-by-a-recognized-ca) -- [Option D: Let's Encrypt Certificate](#option-d-let-s-encrypt-certificate) - -### Option A: Default Rancher-generated Self-signed Certificate - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the minimum installation command below. - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher:latest -``` - -### Option B: Bring Your Own Certificate, Self-signed -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| ------------------- | --------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - --privileged \ - rancher/rancher:latest -``` - -### Option C: Bring Your Own Certificate, Signed by a Recognized CA - -In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After obtaining your certificate, run the Docker command below. - -- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. -- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -| Placeholder | Description | -| ------------------- | ----------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - --privileged \ - rancher/rancher:latest \ - --no-cacerts -``` - -### Option D: Let's Encrypt Certificate - -> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. - -> **Prerequisites:** -> -> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. -> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). -> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. - -| Placeholder | Description | -| ----------------- | ------------------- | -| `` | Your domain address | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher:latest \ - --acme-domain -``` - -## Advanced Options - -When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: - -- Custom CA Certificate -- API Audit Log -- TLS Settings -- Air Gap -- Persistent Data -- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -Refer to [this page](./advanced) for details. - -## Troubleshooting - -Refer to [this page](./troubleshooting) for frequently asked questions and troubleshooting tips. - -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/_index.md deleted file mode 100644 index 3642b5959..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/_index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Advanced Options for Docker Installs -weight: 5 ---- - -When installing Rancher, there are several [advanced options]({{}}/rancher/v2.x/en/installation/options/) that can be enabled: - -- [Custom CA Certificate](#custom-ca-certificate) -- [API Audit Log](#api-audit-log) -- [TLS Settings](#tls-settings) -- [Air Gap](#air-gap) -- [Persistent Data](#persistent-data) -- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) - -### Custom CA Certificate - -If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. - -Use the command example to start a Rancher container with your private CA certificates mounted. - -- The volume flag (`-v`) should specify the host directory containing the CA root certificates. -- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. -- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. -- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. - -The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /host/certs:/container/certs \ - -e SSL_CERT_DIR="/container/certs" \ - --privileged \ - rancher/rancher:latest -``` - -### API Audit Log - -The API Audit Log records all the user and system transactions made through Rancher server. - -The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. - -See [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) for more information and options. - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /var/log/rancher/auditlog:/var/log/auditlog \ - -e AUDIT_LEVEL=1 \ - --privileged \ - rancher/rancher:latest -``` - -### TLS settings - -_Available as of v2.1.7_ - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_TLS_MIN_VERSION="1.0" \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. - -### Air Gap - -If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. - -If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. - -Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. - -To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: - -``` -docker run -d --restart=unless-stopped \ - -p 8080:80 -p 8443:443 \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/_index.md deleted file mode 100644 index d4d60519e..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: HTTP Proxy Configuration -weight: 251 -aliases: - - /rancher/v2.x/en/installation/proxy-configuration/ - - /rancher/v2.x/en/installation/single-node/proxy ---- - -If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. - -Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. - -| Environment variable | Purpose | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | -| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | -| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | -| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | - -> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. - -## Docker Installation - -Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation]({{}}/rancher/v2.x/en/installation/single-node-install/) are: - -- `localhost` -- `127.0.0.1` -- `0.0.0.0` -- `10.0.0.0/8` -- `cattle-system.svc` -- `.svc` -- `.cluster.local` - -The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e HTTP_PROXY="http://192.168.10.1:3128" \ - -e HTTPS_PROXY="http://192.168.10.1:3128" \ - -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local,example.com" \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md deleted file mode 100644 index 589c8781f..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Rolling Back Rancher Installed with Docker -weight: 1015 -aliases: - - /rancher/v2.x/en/upgrades/single-node-rollbacks - - /rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks ---- - -If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade). Rolling back restores: - -- Your previous version of Rancher. -- Your data backup created before upgrade. - -## Before You Start - -During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker pull rancher/rancher: -``` - -In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | ------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that the backup is for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Rolling Back Rancher - -If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. - ->**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. - - For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. - - ``` - docker pull rancher/rancher: - ``` - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - You can obtain the name for your Rancher container by entering `docker ps`. - -1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). - -1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the [placeholder](#before-you-start). Don't forget to close the quotes. - - ``` - docker run --volumes-from rancher-data \ - -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ - && tar zxvf /backup/rancher-data-backup--.tar.gz" - ``` - -1. Start a new Rancher Server container with the `` tag [placeholder](#before-you-start) pointing to the data container. - ``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: - ``` - As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - - >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. - -**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md deleted file mode 100644 index c92de6a54..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md +++ /dev/null @@ -1,366 +0,0 @@ ---- -title: Upgrading Rancher Installed with Docker -weight: 1010 -aliases: - - /rancher/v2.x/en/upgrades/single-node-upgrade/ - - /rancher/v2.x/en/upgrades/upgrades/single-node-air-gap-upgrade - - /rancher/v2.x/en/upgrades/upgrades/single-node - - /rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/ - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/upgrades/single-node/ ---- - -The following instructions will guide you through upgrading a Rancher server that was installed with Docker. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren’t supported. -- **For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Placeholder Review - -During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). - -Here's an **example** of a command with a placeholder: - -``` -docker stop -``` - -In this command, `` is the name of your Rancher container. - -# Get Data for Upgrade Commands - -To obtain the data to replace the placeholders, run: - -``` -docker ps -``` - -Write down or copy this information before starting the upgrade. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | -| `` | `2018-12-19` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -# Upgrade Outline - -During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: - -- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) -- [2. Create a backup tarball](#2-create-a-backup-tarball) -- [3. Pull the new Docker image](#3-pull-the-new-docker-image) -- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) -- [5. Verify the Upgrade](#5-verify-the-upgrade) -- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) - -# 1. Create a copy of the data from your Rancher server container - -1. Using a remote Terminal connection, log into the node running your Rancher server. - -1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - -1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data rancher/rancher: - ``` - -# 2. Create a backup tarball - -1. From the data container that you just created (`rancher-data`), create a backup tarball (`rancher-data-backup--.tar.gz`). - - This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. - - - ``` - docker run --volumes-from rancher-data -v $PWD:/backup busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** When you enter this command, a series of commands should run. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - - ``` - [rancher@ip-10-0-0-50 ~]$ ls - rancher-data-backup-v2.1.3-20181219.tar.gz - ``` - -1. Move your backup tarball to a safe location external from your Rancher server. - -# 3. Pull the New Docker Image - -Pull the image of the Rancher version that you want to upgrade to. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker pull rancher/rancher: -``` - -# 4. Start the New Rancher Server Container - -Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. - ->**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. - -If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/) - -If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -To see the command to use when starting the new Rancher server container, choose from the following options: - -- Docker Upgrade -- Docker Upgrade for Air Gap Installs - -{{% tabs %}} -{{% tab "Docker Upgrade" %}} - -Select which option you had installed Rancher server - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. - -Placeholder | Description -------------|------------- - `` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - --privileged \ - rancher/rancher: -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - --privileged \ - rancher/rancher: \ - --no-cacerts -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} - -### Option D: Let's Encrypt Certificate - -{{% accordion id="option-d" label="Click to expand" %}} - ->**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. - ->**Reminder of the Cert Prerequisites:** -> ->- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). ->- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. -`` | The domain address that you had originally started with - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: \ - --acme-domain -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -{{% /accordion %}} - -{{% /tab %}} -{{% tab "Docker Air Gap Upgrade" %}} - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> For Rancher versions from v2.2.0 to v2.2.x, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.x/en/installation/resources/local-system-charts/) - -When starting the new Rancher server container, choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to to upgrade to. - -``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. - - >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/resources/chart-options/) that you want to upgrade to. - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - --privileged - /rancher/rancher: -``` -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} -{{% /tab %}} -{{% /tabs %}} - -**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. - -# 5. Verify the Upgrade - -Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. - ->**Having network issues in your user clusters following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). - - -# 6. Clean up Your Old Rancher Server Container - -Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. - -# Rolling Back - -If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/). diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md deleted file mode 100644 index 9a1fc02ee..000000000 --- a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Certificate Troubleshooting -weight: 4 ---- -### How Do I Know if My Certificates are in PEM Format? - -You can recognize the PEM format by the following traits: - -- The file begins with the following header: - ``` - -----BEGIN CERTIFICATE----- - ``` -- The header is followed by a long string of characters. -- The file ends with a footer: - -----END CERTIFICATE----- - -PEM Certificate Example: - -``` -----BEGIN CERTIFICATE----- -MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV -... more lines -VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== ------END CERTIFICATE----- -``` - -PEM Certificate Key Example: - -``` ------BEGIN RSA PRIVATE KEY----- -MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV -... more lines -VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== ------END RSA PRIVATE KEY----- -``` - -If your key looks like the example below, see [Converting a Certificate Key From PKCS8 to PKCS1.](#converting-a-certificate-key-from-pkcs8-to-pkcs1) - -``` ------BEGIN PRIVATE KEY----- -MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV -... more lines -VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== ------END PRIVATE KEY----- -``` - -### Converting a Certificate Key From PKCS8 to PKCS1 - -If you are using a PKCS8 certificate key file, Rancher will log the following line: - -``` -ListenConfigController cli-config [listener] failed with : failed to read private key: asn1: structure error: tags don't match (2 vs {class:0 tag:16 length:13 isCompound:true}) -``` - -To make this work, you will need to convert the key from PKCS8 to PKCS1 using the command below: - -``` -openssl rsa -in key.pem -out convertedkey.pem -``` - -You can now use `convertedkey.pem` as certificate key file for Rancher. - -### What is the Order of Certificates if I Want to Add My Intermediate(s)? - -The order of adding certificates is as follows: - -``` ------BEGIN CERTIFICATE----- -%YOUR_CERTIFICATE% ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -%YOUR_INTERMEDIATE_CERTIFICATE% ------END CERTIFICATE----- -``` - -### How Do I Validate My Certificate Chain? - -You can validate the certificate chain by using the `openssl` binary. If the output of the command (see the command example below) ends with `Verify return code: 0 (ok)`, your certificate chain is valid. The `ca.pem` file must be the same as you added to the `rancher/rancher` container. - -When using a certificate signed by a recognized Certificate Authority, you can omit the `-CAfile` parameter. - -Command: - -``` -openssl s_client -CAfile ca.pem -connect rancher.yourdomain.com:443 -... - Verify return code: 0 (ok) -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/requirements/_index.md b/content/rancher/v2.x/en/installation/requirements/_index.md deleted file mode 100644 index a2e4038ca..000000000 --- a/content/rancher/v2.x/en/installation/requirements/_index.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Installation Requirements -description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup -weight: 1 ---- - -This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. - -> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) which will run your apps and services. - -Make sure the node(s) for the Rancher server fulfill the following requirements: - -- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) -- [CPU and Memory](#cpu-and-memory) - - [RKE and Hosted Kubernetes](#rke-and-hosted-kubernetes) - - [K3s Kubernetes](#k3s-kubernetes) - - [RancherD](#rancherd) - - [RKE2 Kubernetes](#rke2-kubernetes) - - [CPU and Memory for Rancher before v2.4.0](#cpu-and-memory-for-rancher-before-v2-4-0) -- [Ingress](#ingress) -- [Disks](#disks) -- [Networking Requirements](#networking-requirements) - - [Node IP Addresses](#node-ip-addresses) - - [Port Requirements](#port-requirements) - -For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices/deployment-types/) - -The Rancher UI works best in Firefox or Chrome. - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution. - -Docker is required for nodes that will run RKE Kubernetes clusters. It is not required for RancherD or RKE2 Kubernetes installs. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. - -Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - -If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.x/en/installation/options/arm64-platform/) - -### RKE Specific Requirements - -For the container runtime, RKE should work with any modern Docker version. - -### K3s Specific Requirements - -For the container runtime, K3s should work with any modern version of Docker or containerd. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. - -If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. - -### RancherD Specific Requirements - -_The RancherD install is available as of v2.5.4. It is an experimental feature._ - -At this time, only Linux OSes that leverage systemd are supported. - -To install RancherD on SELinux Enforcing CentOS 8 or RHEL 8 nodes, some [additional steps](#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) are required. - -Docker is not required for RancherD installs. - -### RKE2 Specific Requirements - -_The RKE2 install is available as of v2.5.6._ - -For details on which OS versions were tested with RKE2, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -Docker is not required for RKE2 installs. - -The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. Currently, RKE2 deploys nginx-ingress as a deployment by default, so you will need to deploy it as a DaemonSet by following [these steps.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke2/#5-configure-nginx-to-be-a-daemonset) - -### Installing Docker - -Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.x/en/installation/requirements/installing-docker) to install Docker with one command. - -Docker is not required for RancherD installs. -# Hardware Requirements - -The following sections describe the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. - -# CPU and Memory - -Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. - -### RKE and Hosted Kubernetes - -These CPU and memory requirements apply to each host in the Kubernetes cluster where the Rancher server is installed. - -These requirements apply to RKE Kubernetes clusters, as well as to hosted Kubernetes clusters such as EKS. - -Performance increased in Rancher v2.4.0. For the requirements of Rancher before v2.4.0, refer to [this section.](#cpu-and-memory-for-rancher-before-v2-4-0) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | ---------- | ------------ | -------| ------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | - -[Contact Rancher](https://rancher.com/contact/) for more than 2000 clusters and/or 20,000 nodes. - -### K3s Kubernetes - -These CPU and memory requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | -| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | - -[Contact Rancher](https://rancher.com/contact/) for more than 2000 clusters and/or 20,000 nodes. - -### RancherD - -_RancherD is available as of v2.5.4. It is an experimental feature._ - -These CPU and memory requirements apply to each instance with RancherD installed. Minimum recommendations are outlined here. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 2 | 5 GB | -| Medium | Up to 15 | Up to 200 | 3 | 9 GB | - -### RKE2 Kubernetes - -These CPU and memory requirements apply to each instance with RKE2 installed. Minimum recommendations are outlined here. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 2 | 5 GB | -| Medium | Up to 15 | Up to 200 | 3 | 9 GB | - -### Docker - -These CPU and memory requirements apply to a host with a [single-node]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) installation of Rancher. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 1 | 4 GB | -| Medium | Up to 15 | Up to 200 | 2 | 8 GB | - -### CPU and Memory for Rancher before v2.4.0 - -{{% accordion label="Click to expand" %}} -These CPU and memory requirements apply to installing Rancher on an RKE Kubernetes cluster before Rancher v2.4.0: - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | --------- | ---------- | ----------------------------------------------- | ----------------------------------------------- | -| Small | Up to 5 | Up to 50 | 2 | 8 GB | -| Medium | Up to 15 | Up to 200 | 4 | 16 GB | -| Large | Up to 50 | Up to 500 | 8 | 32 GB | -| X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | -| XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | -{{% /accordion %}} - -# Ingress - -Each node in the Kubernetes cluster that Rancher is installed on should run an Ingress. - -The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. - -For RKE, K3s and RancherD installations, you don't have to install the Ingress manually because is is installed by default. - -For hosted Kubernetes clusters (EKS, GKE, AKS) and RKE2 Kubernetes installations, you will need to set up the ingress. - -### Ingress for RKE2 - -Currently, RKE2 deploys nginx-ingress as a deployment by default, so you will need to deploy it as a DaemonSet by following [these steps.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke2/#5-configure-nginx-to-be-a-daemonset) - -### Ingress for EKS -For an example of how to deploy an nginx-ingress-controller with a LoadBalancer service, refer to [this section.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/#5-install-an-ingress) - -# Disks - -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. - -# Networking Requirements - -This section describes the networking requirements for the node(s) where the Rancher server is installed. - -### Node IP Addresses - -Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -### Port Requirements - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements]({{}}/rancher/v2.x/en/installation/requirements/ports) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. - -# RancherD on SELinux Enforcing CentOS 8 or RHEL 8 Nodes - -Before installing Rancher on SELinux Enforcing CentOS 8 nodes or RHEL 8 nodes, you must install `container-selinux` and `iptables`: - -``` -sudo yum install iptables -sudo yum install container-selinux -``` diff --git a/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md b/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md deleted file mode 100644 index 02a005d24..000000000 --- a/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Installing Docker -weight: 1 ---- - -For Helm CLI installs, Docker is required to be installed on any node that runs the Rancher server. - -There are a couple of options for installing Docker. One option is to refer to the [official Docker documentation](https://docs.docker.com/install/) about how to install Docker on Linux. The steps will vary based on the Linux distribution. - -Another option is to use one of Rancher's Docker installation scripts, which are available for most recent versions of Docker. - -For example, this command could be used to install Docker 20.10 on Ubuntu: - -``` -curl https://releases.rancher.com/install-docker/20.10.sh | sh -``` - -Rancher has installation scripts for every version of upstream Docker that Kubernetes supports. To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher's Docker installation scripts. diff --git a/content/rancher/v2.x/en/installation/requirements/ports/_index.md b/content/rancher/v2.x/en/installation/requirements/ports/_index.md deleted file mode 100644 index d98190dc1..000000000 --- a/content/rancher/v2.x/en/installation/requirements/ports/_index.md +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: Port Requirements -description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes -weight: 300 -aliases: - - /rancher/v2.5/en/installation/references ---- - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. - -- [Rancher Nodes](#rancher-nodes) - - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) - - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) - - [Ports for Rancher Server Nodes on RancherD or RKE2](#ports-for-rancher-server-nodes-on-rancherd-or-rke2) - - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) -- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) - - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) - - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) - - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) - - [Ports for Registered Clusters](#ports-for-registered-clusters) -- [Other Port Considerations](#other-port-considerations) - - [Commonly Used Ports](#commonly-used-ports) - - [Local Node Traffic](#local-node-traffic) - - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) - - [Opening SUSE Linux Ports](#opening-suse-linux-ports) - -# Rancher Nodes - -The following table lists the ports that need to be open to and from nodes that are running the Rancher server. - -The port requirements differ based on the Rancher server architecture. - -As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster. For Rancher installs on a K3s, RKE, or RKE2 Kubernetes cluster, refer to the tabs below. For other Kubernetes distributions, refer to the distribution's documentation for the port requirements for cluster nodes. - -> **Notes:** -> -> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). -> - Kubernetes recommends TCP 30000-32767 for node port services. -> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. - -### Ports for Rancher Server Nodes on K3s - -{{% accordion label="Click to expand" %}} - -The K3s server needs port 6443 to be accessible by the nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • server nodes
  • agent nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | -| TCP | 6443 | K3s server nodes | Kubernetes API -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. -| TCP | 10250 | K3s server and agent nodes | kubelet - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RKE - -{{% accordion label="Click to expand" %}} - -Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. - -The following tables break down the port requirements for traffic between the Rancher nodes: - -
Rules for traffic between Rancher nodes
- -| Protocol | Port | Description | -|-----|-----|----------------| -| TCP | 443 | Rancher agents | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| TCP | 6443 | Kubernetes apiserver | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 10250 | kubelet | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | -| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | -| TCP | 443 |
  • Load Balancer/Reverse Proxy
  • IPs of all cluster nodes and other API/UI clients
| HTTPS traffic to Rancher UI/API | -| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -|-----|-----|----------------|---| -| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | -| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | -| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | -| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RancherD or RKE2 - -{{% accordion label="Click to expand" %}} - -The RancherD (or RKE2) server needs port 6443 and 9345 to be accessible by other nodes in the cluster. - -All nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -**Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -
Inbound Rules for RancherD or RKE2 Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 9345 | RancherD/RKE2 agent nodes | Kubernetes API -| TCP | 6443 | RancherD/RKE2 agent nodes | Kubernetes API -| UDP | 8472 | RancherD/RKE2 server and agent nodes | Required only for Flannel VXLAN -| TCP | 10250 | RancherD/RKE2 server and agent nodes | kubelet -| TCP | 2379 | RancherD/RKE2 server nodes | etcd client port -| TCP | 2380 | RancherD/RKE2 server nodes | etcd peer port -| TCP | 30000-32767 | RancherD/RKE2 server and agent nodes | NodePort port range -| HTTP | 8080 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| HTTPS | 8443 |
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl. Not needed if you have LB doing TLS termination. | - -Typically all outbound traffic is allowed. -{{% /accordion %}} - -### Ports for Rancher Server in Docker - -{{% accordion label="Click to expand" %}} - -The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: - -
Inbound Rules for Rancher Node
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used -| TCP | 443 |
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl - -
Outbound Rules for Rancher Node
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -### Ports for Rancher Server in GCP GKE - -When deploying Rancher into a Google Kubernetes Engine [private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters), the nodes where Rancher runs must be accessible from the control plane: - -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 9443 | The GKE master `/28` range | Rancher webhooks | - -# Downstream Kubernetes Cluster Nodes - -Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. - -The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.x/en/cluster-provisioning/). - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - ->**Tip:** -> ->If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. - -### Ports for Rancher Launched Kubernetes Clusters using Node Pools - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). - ->**Note:** ->The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. - -{{< ports-iaas-nodes >}} - -{{% /accordion %}} - -### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). - -{{< ports-custom-nodes >}} - -{{% /accordion %}} - -### Ports for Hosted Kubernetes Clusters - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - -### Ports for Registered Clusters - -Note: Registered clusters were called imported clusters before Rancher v2.5. - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - - -# Other Port Considerations - -### Commonly Used Ports - -These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. - -{{% include file="/rancher/v2.x/en/installation/requirements/ports/common-ports-table" %}} - ----- - -### Local Node Traffic - -Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). -These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. - -However, this traffic may be blocked when: - -- You have applied strict host firewall policies on the node. -- You are using nodes that have multiple interfaces (multihomed). - -In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. - -### Rancher AWS EC2 Security Group - -When using the [AWS EC2 node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. - -| Type | Protocol | Port Range | Source/Destination | Rule Type | -|-----------------|:--------:|:-----------:|------------------------|:---------:| -| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | -| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | -| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | -| All traffic | All | All | 0.0.0.0/0 | Outbound | - -### Opening SUSE Linux Ports - -SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, - -1. SSH into the instance. -1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: - ``` - FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" - FW_SERVICES_EXT_UDP="8472 30000:32767" - FW_ROUTE=yes - ``` -1. Restart the firewall with the new ports: - ``` - SuSEfirewall2 - ``` - -**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/content/rancher/v2.x/en/installation/requirements/ports/common-ports-table/index.md b/content/rancher/v2.x/en/installation/requirements/ports/common-ports-table/index.md deleted file mode 100644 index 86bb7177b..000000000 --- a/content/rancher/v2.x/en/installation/requirements/ports/common-ports-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Protocol | Port | Description | -|:--------: |:----------------: |---------------------------------------------------------------------------------- | -| TCP | 22 | Node driver SSH provisioning | -| TCP | 179 | Calico BGP Port | -| TCP | 2376 | Node driver Docker daemon TLS port | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | -| TCP | 8443 | Rancher webhook | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | -| TCP | 9443 | Rancher webhook | -| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | -| TCP | 6783 | Weave Port | -| UDP | 6783-6784 | Weave UDP Ports | -| TCP | 10250 | kubelet API | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | -| TCP/UDP | 30000-
32767 | NodePort port range | diff --git a/content/rancher/v2.x/en/installation/resources/_index.md b/content/rancher/v2.x/en/installation/resources/_index.md deleted file mode 100644 index 4e3ebe858..000000000 --- a/content/rancher/v2.x/en/installation/resources/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Resources -weight: 5 -aliases: -- /rancher/v2.x/en/installation/options ---- - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Advanced Options - -When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: - -| Advanced Option | Available as of | -| ----------------------------------------------------------------------------------------------------------------------- | --------------- | -| [Custom CA Certificate]({{}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/) | v2.0.0 | -| [API Audit Log]({{}}/rancher/v2.x/en/installation/options/api-audit-log/) | v2.0.0 | -| [TLS Settings]({{}}/rancher/v2.x/en/installation/options/tls-settings/) | v2.1.7 | -| [etcd configuration]({{}}/rancher/v2.x/en/installation/options/etcd/) | v2.2.0 | -| [Local System Charts for Air Gap Installations]({{}}/rancher/v2.x/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/content/rancher/v2.x/en/installation/resources/advanced/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/_index.md deleted file mode 100644 index f5e421955..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Advanced -weight: 1000 ---- - -The documents in this section contain resources for less common use cases. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/_index.md deleted file mode 100644 index 720f5d24b..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Installing Rancher in an Air Gapped Environment with Helm 2 -weight: 2 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/ - - /rancher/v2.x/en/installation/air-gap-high-availability/ - - /rancher/v2.x/en/installation/air-gap-single-node/ - - /rancher/v2.x/en/installation/options/air-gap-helm2 ---- - -> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. -> -> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. -> -> This section provides a copy of the older instructions for installing Rancher on a Kubernetes cluster using Helm 2 in an air air gap environment, and it is intended to be used if upgrading to Helm 3 is not feasible. - -This section is about installations of Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -Throughout the installations instructions, there will be _tabs_ for either a high availability Kubernetes installation or a single-node Docker installation. - -### Air Gapped Kubernetes Installations - -This section covers how to install Rancher on a Kubernetes cluster in an air gapped environment. - -A Kubernetes installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -### Air Gapped Docker Installations - -These instructions also cover how to install Rancher on a single node in an air gapped environment. - -The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. - -Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -# Installation Outline - -- [1. Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) -- [2. Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) -- [3. Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -- [4. Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) - -### [Next: Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md deleted file mode 100644 index b0d43bfeb..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md +++ /dev/null @@ -1,334 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/install-rancher/ - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/ - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.x/en/installation/air-gap-single-node/install-rancher - - /rancher/v2.x/en/installation/air-gap/install-rancher - - /rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher ---- - -This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes Installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher in five parts: - -- [A. Add the Helm Chart Repository](#a-add-the-helm-chart-repository) -- [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration) -- [C. Render the Rancher Helm Template](#c-render-the-rancher-helm-template) -- [D. Install Rancher](#d-install-rancher) -- [E. For Rancher versions before v2.3.0, Configure System Charts](#e-for-rancher-versions-before-v2-3-0-configure-system-charts) - -### A. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - ```plain - helm init -c - ``` - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/resources/choosing-version/). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. -```plain -helm fetch rancher-/rancher -``` - -> Want additional options? See the Rancher [Helm chart options]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options). - -### B. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -### C. Render the Rancher Helm Template - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. - -{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - -1. From a system connected to the internet, add the cert-manager repo to Helm. - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.14.2 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - ```plain - helm template ./cert-manager-v0.14.2.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml - ``` -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - - ```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} - -{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -{{% /accordion %}} - -### D. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -{{% accordion id="install-cert-manager" label="Self-Signed Certificate Installs - Install Cert-manager" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - -> **Important:** -> If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false flag to your kubectl apply command above else you will receive a validation error relating to the x-kubernetes-preserve-unknown-fields field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -Install Rancher: - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` - -**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. - -### E. For Rancher versions before v2.3.0, Configure System Charts - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/). - -### Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/options/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users that are wanting to **test** out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. **Important: If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -> **Do you want to...** -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/options/chart-options/#additional-trusted-cas). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). - -- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.x/en/installation/options/local-system-charts/) - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in PEM format. - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -If you are installing Rancher v2.3.0+, the installation is complete. - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/). - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md deleted file mode 100644 index 8b8d2a8cb..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: '3. Install Kubernetes with RKE (Kubernetes Installs Only)' -weight: 300 -aliases: - - /rancher/v2.x/en/installation/air-gap-high-availability/install-kube - - /rancher/v2.x/en/installation/options/air-gap-helm2/launch-kubernetes ---- - -This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. - -Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE]({{}}/rke/latest/en/installation/) and create a RKE config file. - -- [A. Create an RKE Config File](#a-create-an-rke-config-file) -- [B. Run RKE](#b-run-rke) -- [C. Save Your Files](#c-save-your-files) - -### A. Create an RKE Config File - -From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts) you created. - -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gap network. | -| `user` | ✓ | A user that can run docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: - - address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: - - url: # private registry url - user: rancher - password: '*********' - is_default: true -``` - -### B. Run RKE - -After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: - -``` -rke up --config ./rancher-cluster.yml -``` - -### C. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md deleted file mode 100644 index 3b52765be..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md +++ /dev/null @@ -1,275 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/ - - /rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/ - - /rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/ - - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry ---- - -> **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. -> -> **Note:** Populating the private registry with images is the same process for HA and Docker installations, the differences in this section is based on whether or not you are planning to provision a Windows cluster or not. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.x/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed for a Windows cluster. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -A. Find the required assets for your Rancher version
-B. Collect all the required images
-C. Save the images to your workstation
-D. Populate the private registry - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets*.* - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### B. Collect all the required images (For Kubernetes Installs using Rancher Generated Self-Signed Certificate) - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### C. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### D. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -_Available as of v2.3.0_ - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -### Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -A. Find the required assets for your Rancher version
-B. Save the images to your Windows Server workstation
-C. Prepare the Docker daemon
-D. Populate the private registry - -{{% accordion label="Collecting and Populating Windows Images into the Private Registry"%}} - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|------------------------|-------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - -### B. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - - ```plain - ./rancher-save-images.ps1 - ``` - - **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - -### C. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - -### D. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -{{% /accordion %}} - -### Linux Steps - -The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -A. Find the required assets for your Rancher version
-B. Collect all the required images
-C. Save the images to your Linux workstation
-D. Populate the private registry - -{{% accordion label="Collecting and Populating Linux Images into the Private Registry" %}} - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -|----------------------------|------| -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### B. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - - 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - - 2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### C. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### D. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. The `rancher-images.txt` / `rancher-windows-images.txt` image list is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. - -1. Log into your private registry if required: - ```plain - docker login - ``` - -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry - ``` - -{{% /accordion %}} - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next: Docker Installs - Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md deleted file mode 100644 index e6fd8736c..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: '1. Prepare your Node(s)' -weight: 100 -aliases: - - /rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts - - /rancher/v2.x/en/installation/air-gap-single-node/provision-host - - /rancher/v2.x/en/installation/options/air-gap-helm2/prepare-nodes ---- - -This section is about how to prepare your node(s) to install Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -# Prerequisites - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -### OS, Docker, Hardware, and Networking - -Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -### Private Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). - -### CLI Tools - -The following CLI tools are required for the Kubernetes Install. Make sure these tools are installed on your workstation and available in your `$PATH`. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -{{% /tab %}} -{{% tab "Docker Install" %}} - -### OS, Docker, Hardware, and Networking - -Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -### Private Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). -{{% /tab %}} -{{% /tabs %}} - -# Set up Infrastructure - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
- -![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -### A. Provision three air gapped Linux hosts according to our requirements - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -### B. Set up your Load Balancer - -When setting up the Kubernetes cluster that will run the Rancher server components, an Ingress controller pod will be deployed on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. - -You will need to configure a load balancer as a basic Layer 4 TCP forwarder to direct traffic to these ingress controller pods. The exact configuration will vary depending on your environment. - -> **Important:** -> Only use this load balancer (i.e, the `local` cluster Ingress) to load balance the Rancher server. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. - -**Load Balancer Configuration Samples:** - -- For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx) -- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation. - -Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -### A. Provision a single, air gapped Linux host according to our Requirements - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/api-audit-log/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/api-audit-log/_index.md deleted file mode 100644 index 968ac51ea..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/api-audit-log/_index.md +++ /dev/null @@ -1,569 +0,0 @@ ---- -title: Enabling the API Audit Log to Record System Events -weight: 4 -aliases: - - /rancher/v2.x/en/installation/options/api-audit-log/ - - /rancher/v2.x/en/installation/api-auditing ---- - -You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. - -You can enable API Auditing during Rancher installation or upgrade. - -## Enabling API Audit Log - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) - -## API Audit Log Options - -The usage below defines rules about what the audit log should record and what data it should include: - -| Parameter | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | -| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| -| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | -| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | -| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | - -
- -### Audit Log Levels - -The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. - -| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | -| --------------------- | ---------------- | ------------ | ----------------- | ------------- | -| `0` | | | | | -| `1` | ✓ | | | | -| `2` | ✓ | ✓ | | | -| `3` | ✓ | ✓ | ✓ | ✓ | - -## Viewing API Audit Logs - -### Docker Install - -Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. - -### Kubernetes Install - -Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. - -The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. - -#### CLI - -```bash -kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log -``` - -#### Rancher Web GUI - -1. From the context menu, select **Cluster: local > System**. -1. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. -1. Pick one of the `rancher` pods and select **⋮ > View Logs**. -1. From the **Logs** drop-down, select `rancher-audit-log`. - -#### Shipping the Audit Log - -You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging) for details. - -## Audit Log Samples - -After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. - -### Metadata Level - -If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. - -```json -{ - "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", - "requestURI": "/v3/schemas", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "GET", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:22:43 +0800" -} -``` - -### Metadata and Request Body Level - -If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:28:08 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my description", - "volumes": [] - } -} -``` - -### Metadata, Request Body, and Response Body Level - -If you set your `AUDIT_LEVEL` to `3`, Rancher logs: - -- The metadata header and body for every API request. -- The metadata header and body for every API response. - -#### Request - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my decript", - "volumes": [] - } -} -``` - -#### Response - -The code sample below depicts an API response, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "responseStatus": "200", - "stage": "ResponseComplete", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "responseBody": { - "actionLinks": { - "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", - "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", - "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" - }, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements" - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container" - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "links": { - "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", - "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" - }, - "name": "nginx", - "namespaceId": "default", - "paused": false, - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - } - } -} -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/arm64-platform/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/arm64-platform/_index.md deleted file mode 100644 index 60f93f657..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/arm64-platform/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Running on ARM64 (Experimental)" -weight: 3 -aliases: - - /rancher/v2.x/en/installation/options/arm64-platform ---- - -> **Important:** -> -> Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. - -The following options are available when using an ARM64 platform: - -- Running Rancher on ARM64 based node(s) - - Only [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) -- Create custom cluster and adding ARM64 based node(s) - - Kubernetes cluster version must be 1.12 or higher - - CNI Network Provider must be [Flannel]({{}}/rancher/v2.x/en/faq/networking/cni-providers/#flannel) -- Importing clusters that contain ARM64 based nodes - - Kubernetes cluster version must be 1.12 or higher - -Please see [Cluster Options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) how to configure the cluster options. - -The following features are not tested: - -- Monitoring, alerts, notifiers, pipelines and logging -- Launching apps from the catalog diff --git a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md deleted file mode 100644 index 4a233f537..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Template for an RKE Cluster with a Certificate Signed by Recognized CA and a Layer 4 Load Balancer -weight: 3 -aliases: - - /rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca ---- - -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Certificate signed by a recognized CA -- Layer 4 load balancer -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: # ssl cert for ingress. If self-signed, must be signed by same CA as cattle server - tls.key: # ssl key for ingress. If self-signed, must be signed by same CA as cattle server - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: # FQDN to access cattle server - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - # FQDN to access cattle server - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - args: - - --no-cacerts - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md deleted file mode 100644 index ec05b5bc4..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Template for an RKE Cluster with a Self-signed Certificate and Layer 4 Load Balancer -weight: 2 -aliases: - - /rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-certificate ---- -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Self-signed SSL -- Layer 4 load balancer -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: # ssl cert for ingress. If selfsigned, must be signed by same CA as cattle server - tls.key: # ssl key for ingress. If selfsigned, must be signed by same CA as cattle server - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: # CA cert used to sign cattle server cert and key - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: # FQDN to access cattle server - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - # FQDN to access cattle server - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP - volumeMounts: - - mountPath: /etc/rancher/ssl - name: cattle-keys-volume - readOnly: true - volumes: - - name: cattle-keys-volume - secret: - defaultMode: 420 - secretName: cattle-keys-server -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md deleted file mode 100644 index 593a42473..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Template for an RKE Cluster with a Self-signed Certificate and SSL Termination on Layer 7 Load Balancer -weight: 3 -aliases: - - /rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate ---- - -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Layer 7 load balancer with self-signed SSL termination (HTTPS) -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: # CA cert used to sign cattle server cert and key - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable redirect to ssl - spec: - rules: - - host: - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP - volumeMounts: - - mountPath: /etc/rancher/ssl - name: cattle-keys-volume - readOnly: true - volumes: - - name: cattle-keys-volume - secret: - defaultMode: 420 - secretName: cattle-keys-server -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md deleted file mode 100644 index 917a39af6..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Template for an RKE Cluster with a Recognized CA Certificate and SSL Termination on Layer 7 Load Balancer -weight: 4 -aliases: - - /rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca ---- - -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Layer 7 load balancer with SSL termination (HTTPS) -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable redirect to ssl - spec: - rules: - - host: - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - args: - - --no-cacerts - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/advanced/etcd/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/etcd/_index.md deleted file mode 100644 index 38fbc6321..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/etcd/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Tuning etcd for Large Installations -weight: 2 -aliases: - - /rancher/v2.x/en/installation/options/etcd ---- - -When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. - -The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. - -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB - -```yaml -# RKE cluster.yml ---- -services: - etcd: - extra_args: - quota-backend-bytes: 5368709120 -``` - -## Scaling etcd disk performance - -You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.4.0/tuning/#disk) on how to tune the disk priority on the host. - -Additionally, to reduce IO contention on the disks for etcd, you can use a dedicated device for the data and wal directory. Based on etcd best practices, mirroring RAID configurations are unnecessary because etcd replicates data between the nodes in the cluster. You can use stripping RAID configurations to increase available IOPS. - -To implement this solution in an RKE cluster, the `/var/lib/etcd/data` and `/var/lib/etcd/wal` directories will need to have disks mounted and formatted on the underlying host. In the `extra_args` directive of the `etcd` service, you must include the `wal_dir` directory. Without specifying the `wal_dir`, etcd process will try to manipulate the underlying `wal` mount with insufficient permissions. - -```yaml -# RKE cluster.yml ---- -services: - etcd: - extra_args: - data-dir: '/var/lib/rancher/etcd/data/' - wal-dir: '/var/lib/rancher/etcd/wal/wal_dir' - extra_binds: - - '/var/lib/etcd/data:/var/lib/rancher/etcd/data' - - '/var/lib/etcd/wal:/var/lib/rancher/etcd/wal' -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/firewall/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/firewall/_index.md deleted file mode 100644 index 42d2379f0..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/firewall/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Opening Ports with firewalld -weight: 1 ---- - -> We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - -Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. - -For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: - -``` -Chain INPUT (policy ACCEPT) -target prot opt source destination -ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED -ACCEPT icmp -- anywhere anywhere -ACCEPT all -- anywhere anywhere -ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain FORWARD (policy ACCEPT) -target prot opt source destination -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain OUTPUT (policy ACCEPT) -target prot opt source destination -``` - -You can check the default firewall rules with this command: - -``` -sudo iptables --list -``` - -This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.x/en/installation/references) for nodes in a high-availability Rancher server cluster. - -# Prerequisite - -Install v7.x or later ofv`firewalld`: - -``` -yum install firewalld -systemctl start firewalld -systemctl enable firewalld -``` - -# Applying Firewall Port Rules - -In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: - -``` -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` -If your Rancher server nodes have separate roles, use the following commands based on the role of the node: - -``` -# For etcd nodes, run the following commands: -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp - -# For control plane nodes, run the following commands: -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp - -# For worker nodes, run the following commands: -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` - -After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: - -``` -firewall-cmd --reload -``` - -**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/_index.md deleted file mode 100644 index b3bdb08f7..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Kubernetes Installation Using Helm 2 -weight: 1 -aliases: - - /rancher/v2.x/en/installation/options/helm2 ---- - -> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. -> -> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. -> -> This section provides a copy of the older high-availability Kubernetes Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -For production environments, we recommend installing Rancher in a high-availability configuration so that your user base can always access Rancher Server. When installed in a Kubernetes cluster, Rancher will integrate with the cluster's etcd database and take advantage of Kubernetes scheduling for high-availability. - -This procedure walks you through setting up a 3-node cluster with Rancher Kubernetes Engine (RKE) and installing the Rancher chart with the Helm package manager. - -> **Important:** The Rancher management server can only be run on an RKE-managed Kubernetes cluster. Use of Rancher on hosted Kubernetes or other providers is not supported. - -> **Important:** For the best performance, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -## Recommended Architecture - -- DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Install]({{}}/img/rancher/ha/rancher2ha.svg) -Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers - -## Required Tools - -The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -## Installation Outline - -- [Create Nodes and Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/) -- [Install Kubernetes with RKE]({{}}/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/) -- [Initialize Helm (tiller)]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) -- [Install Rancher]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) - -## Additional Install Options - -- [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) - -## Previous Methods - -[RKE add-on install]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/) - -> **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> -> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> -> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md deleted file mode 100644 index 05ccaad00..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "1. Create Nodes and Load Balancer" -weight: 185 -aliases: - - /rancher/v2.x/en/installation/options/helm2/create-nodes-lb ---- - -Use your provider of choice to provision 3 nodes and a Load Balancer endpoint for your RKE install. - -> **Note:** These nodes must be in the same region/datacenter. You may place these servers in separate availability zones. - -### Node Requirements - -View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -View the OS requirements for RKE at [RKE Requirements]({{}}/rke/latest/en/os/) - -### Load Balancer - -RKE will configure an Ingress controller pod, on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. - -Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configuration will vary depending on your environment. - ->**Important:** ->Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -#### Examples - -* [Nginx]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nginx/) -* [Amazon NLB]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nlb/) - -### [Next: Install Kubernetes with RKE]({{}}/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md deleted file mode 100644 index 89cd1374e..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: NGINX -weight: 270 -aliases: - - /rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nginx ---- -NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. - ->**Note:** -> In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. -> -> One caveat: do not use one of your Rancher nodes as the load balancer. - -## Install NGINX - -Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. - -## Create NGINX Configuration - -After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/). - - >**Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. - -
Example NGINX config
- ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - stream { - upstream rancher_servers_http { - least_conn; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - } - server { - listen 80; - proxy_pass rancher_servers_http; - } - - upstream rancher_servers_https { - least_conn; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers_https; - } - } - ``` - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -## Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md deleted file mode 100644 index 70569e316..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Amazon NLB -weight: 277 -aliases: - - /rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nlb ---- -## Objectives - -Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. - -1. [Create Target Groups](#create-target-groups) - - Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -2. [Register Targets](#register-targets) - - Add your Linux nodes to the target groups. - -3. [Create Your NLB](#create-your-nlb) - - Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. - -> **Note:** Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ELB or ALB. - -## Create Target Groups - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX ingress controller on the nodes will make sure that port 80 gets redirected to port 443. - -Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. - -The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. - -{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} - -Click **Create target group** to create the first target group, regarding TCP port 443. - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-443` -Protocol | `TCP` -Port | `443` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `override`,`80` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 443 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} - -
-**Screenshot Target group TCP port 443 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} - -
- -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-80` -Protocol | `TCP` -Port | `80` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `traffic port` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 80 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} - -
-**Screenshot Target group TCP port 80 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} - -
- -## Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -## Create Your NLB - -Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. - -5. Complete the **Step 1: Configure Load Balancer** form. - - **Basic Configuration** - - - Name: `rancher` - - Scheme: `internal` or `internet-facing` - - The Scheme that you choose for your NLB is dependent on the configuration of your instances/VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. - - **Listeners** - - Add the **Load Balancer Protocols** and **Load Balancer Ports** below. - - `TCP`: `443` - - - **Availability Zones** - - - Select Your **VPC** and **Availability Zones**. - -6. Complete the **Step 2: Configure Routing** form. - - - From the **Target Group** drop-down, choose **Existing target group**. - - - From the **Name** drop-down, choose `rancher-tcp-443`. - - - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. - -8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. - -9. After AWS creates the NLB, click **Close**. - -## Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/_index.md deleted file mode 100644 index bb62d0219..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "Initialize Helm: Install the Tiller Service" -description: "With Helm, you can create configurable deployments instead of using static files. In order to use Helm, the Tiller service needs to be installed on your cluster." -weight: 195 -aliases: - - /rancher/v2.x/en/installation/options/helm2/helm-init ---- - -Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. - -For systems without direct internet access, see [Helm - Air Gap]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) for install details. - -Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) - -### Install Tiller on the Cluster - -> **Important:** Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. - -Helm installs the `tiller` service on your cluster to manage charts. Since RKE enables RBAC by default we will need to use `kubectl` to create a `serviceaccount` and `clusterrolebinding` so `tiller` has permission to deploy to the cluster. - -* Create the `ServiceAccount` in the `kube-system` namespace. -* Create the `ClusterRoleBinding` to give the `tiller` account access to the cluster. -* Finally use `helm` to install the `tiller` service - -```plain -kubectl -n kube-system create serviceaccount tiller - -kubectl create clusterrolebinding tiller \ - --clusterrole=cluster-admin \ - --serviceaccount=kube-system:tiller - -helm init --service-account tiller - -# Users in China: You will need to specify a specific tiller-image in order to initialize tiller. -# The list of tiller image tags are available here: https://dev.aliyun.com/detail.html?spm=5176.1972343.2.18.ErFNgC&repoId=62085. -# When initializing tiller, you'll need to pass in --tiller-image - -helm init --service-account tiller \ ---tiller-image registry.cn-hangzhou.aliyuncs.com/google_containers/tiller: -``` - -> **Note:** This`tiller`install has full cluster access, which should be acceptable if the cluster is dedicated to Rancher server. Check out the [helm docs](https://docs.helm.sh/using_helm/#role-based-access-control) for restricting `tiller` access to suit your security requirements. - -### Test your Tiller installation - -Run the following command to verify the installation of `tiller` on your cluster: - -``` -kubectl -n kube-system rollout status deploy/tiller-deploy -Waiting for deployment "tiller-deploy" rollout to finish: 0 of 1 updated replicas are available... -deployment "tiller-deploy" successfully rolled out -``` - -And run the following command to validate Helm can talk to the `tiller` service: - -``` -helm version -Client: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b4babf9d818144e", GitTreeState:"clean"} -Server: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b4babf9d818144e", GitTreeState:"clean"} -``` - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/troubleshooting/) page. - -### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md deleted file mode 100644 index 112b87f3c..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Troubleshooting -weight: 276 -aliases: - - /rancher/v2.x/en/installation/options/helm2/helm-init/troubleshooting ---- - -### Helm commands show forbidden - -When Helm is initiated in the cluster without specifying the correct `ServiceAccount`, the command `helm init` will succeed but you won't be able to execute most of the other `helm` commands. The following error will be shown: - -``` -Error: configmaps is forbidden: User "system:serviceaccount:kube-system:default" cannot list configmaps in the namespace "kube-system" -``` - -To resolve this, the server component (`tiller`) needs to be removed and added with the correct `ServiceAccount`. You can use `helm reset --force` to remove the `tiller` from the cluster. Please check if it is removed using `helm version --server`. - -``` -helm reset --force -Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster. -helm version --server -Error: could not find tiller -``` - -When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) to install `tiller` with the correct `ServiceAccount`. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/_index.md deleted file mode 100644 index 9382bc959..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/_index.md +++ /dev/null @@ -1,220 +0,0 @@ ---- -title: "4. Install Rancher" -weight: 200 -aliases: - - /rancher/v2.x/en/installation/options/helm2/helm-rancher ---- - -Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). - -Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) - -### Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/resources/choosing-version). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -There are three recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -|-----|-----|-----|-----| -| [Rancher Generated Certificates](#rancher-generated-certificates) | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** | [yes](#optional-install-cert-manager) | -| [Let’s Encrypt](#let-s-encrypt) | `ingress.tls.source=letsEncrypt` | Use [Let's Encrypt](https://letsencrypt.org/) to issue a certificate | [yes](#optional-install-cert-manager) | -| [Certificates from Files](#certificates-from-files) | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s) | no | - -### Optional: Install cert-manager - -**Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). - -> **Important:** -> Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. - -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). - -Rancher relies on [cert-manager](https://github.com/jetstack/cert-manager) to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. - -These instructions are adapted from the [official cert-manager documentation](https://docs.cert-manager.io/en/latest/getting-started/install/kubernetes.html#installing-with-helm). - - -1. Install the CustomResourceDefinition resources separately - ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml - ``` - -1. Create the namespace for cert-manager - ```plain - kubectl create namespace cert-manager - ``` - -1. Label the cert-manager namespace to disable resource validation - ```plain - kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true - ``` - -1. Add the Jetstack Helm repository - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - ```plain - helm repo update - ``` - -1. Install the cert-manager Helm chart - ```plain - helm install \ - --name cert-manager \ - --namespace cert-manager \ - --version v0.14.2 \ - jetstack/cert-manager - ``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m -cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m -cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m -``` - -If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check the [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. - -
- -#### Rancher Generated Certificates - -> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. - -The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set the `hostname` to the DNS name you pointed at your load balancer. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -#### Let's Encrypt - -> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. This configuration uses HTTP validation (`HTTP-01`) so the load balancer must have a public DNS record and be accessible from the internet. - -- Set `hostname` to the public DNS record, set `ingress.tls.source` to `letsEncrypt` and `letsEncrypt.email` to the email address used for communication about your certificate (for example, expiry notices) -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -#### Certificates from Files - -Create Kubernetes secrets from your own certificates for Rancher to use. - - -> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.x/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set `hostname` and set `ingress.tls.source` to `secret`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### Advanced Configurations - -The Rancher chart configuration has many options for customizing the install to suit your specific environment. Here are some common advanced scenarios. - -* [HTTP Proxy]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/) -* [Private Docker Image Registry]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -* [TLS Termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/) for the full list of options. - -### Save your options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it you should have a functional Rancher server. Point a browser at the hostname you picked and you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/troubleshooting/) Page diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md deleted file mode 100644 index bef4697b4..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -title: Chart Options -weight: 276 -aliases: - - /rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options ---- - -### Common Options - -| Option | Default Value | Description | -| --- | --- | --- | -| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | -| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | -| `letsEncrypt.email` | " " | `string` - Your email address | -| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | -| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | - -
- -### Advanced Options - -| Option | Default Value | Description | -| --- | --- | --- | -| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | -| `addLocal` | "auto" | `string` - Have Rancher detect and import the local Rancher server cluster | -| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | -| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | -| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | -| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | -| `debug` | false | `bool` - set debug flag on rancher server | -| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | -| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | -| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | -| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | -| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" | `string` - comma separated list of hostnames or ip address not to use the proxy | -| `resources` | {} | `map` - rancher pod resource requests & limits | -| `rancherImage` | "rancher/rancher" | `string` - rancher image source | -| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | -| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | -| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ _Available as of v2.3.0_ | -| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. _Available as of v2.3.0_ - -
- -### API Audit Log - -Enabling the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing/). - -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. - -```plain ---set auditLog.level=1 -``` - -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. - -Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. - -### Setting Extra Environment Variables - -_Available as of v2.2.0_ - -You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -### TLS settings - -_Available as of v2.2.0_ - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. - -### Import `local` Cluster - -By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. - -If this is a concern in your environment you can set this option to "false" on your initial install. - -> Note: This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. - -```plain ---set addLocal="false" -``` - -### Customizing your Ingress - -To customize or use a different ingress with Rancher server you can set your own Ingress annotations. - -Example on setting a custom certificate issuer: - -```plain ---set ingress.extraAnnotations.'certmanager\.k8s\.io/cluster-issuer'=ca-key-pair -``` - -_Available as of v2.0.15, v2.1.10 and v2.2.4_ - -Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. - -```plain ---set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' -``` - -### HTTP Proxy - -Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. - -Add your IP exceptions to the `noProxy` list. Make sure you add the Service cluster IP range (default: 10.43.0.1/16) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. - -```plain ---set proxy="http://:@:/" ---set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16" -``` - -### Additional Trusted CAs - -If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. - -```plain ---set additionalTrustedCAs=true -``` - -Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. - -```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem -``` - -### Private Registry and Air Gap Installs - -For details on installing Rancher with a private registry, see: - -- [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) - - -### External TLS Termination - -We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. - -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. - -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/) to add the CA cert for Rancher. - -Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. - -#### Configuring Ingress for External TLS when Using NGINX v0.25 - -In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: - -```yaml -ingress: - provider: nginx - options: - use-forwarded-headers: "true" -``` - -#### Required Headers - -* `Host` -* `X-Forwarded-Proto` -* `X-Forwarded-Port` -* `X-Forwarded-For` - -#### Recommended Timeouts - -* Read Timeout: `1800 seconds` -* Write Timeout: `1800 seconds` -* Connect Timeout: `30 seconds` - -#### Health Checks - -Rancher will respond `200` to health checks on the `/healthz` endpoint. - - -#### Example NGINX config - -This NGINX configuration is tested on NGINX 1.14. - - >**Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -* Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -* Replace both occurrences of `FQDN` to the DNS name for Rancher. -* Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md deleted file mode 100644 index 25465c243..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Adding Kubernetes TLS Secrets -description: Read about how to populate the Kubernetes TLS secret for a Rancher installation -weight: 276 -aliases: - - /rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets ---- - -Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. - -Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. - -Use `kubectl` with the `tls` secret type to create the secrets. - -``` -kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. - -### Using a Private CA Signed Certificate - -If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. - -Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. - ->**Important:** Make sure the file is called `cacerts.pem` as Rancher uses that filename to configure the CA certificate. - -``` -kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md deleted file mode 100644 index 561f457d3..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Troubleshooting -weight: 276 -aliases: - - /rancher/v2.x/en/installation/options/helm2/helm-rancher/troubleshooting ---- - -### Where is everything - -Most of the troubleshooting will be done on objects in these 3 namespaces. - -* `cattle-system` - `rancher` deployment and pods. -* `ingress-nginx` - Ingress controller pods and services. -* `kube-system` - `tiller` and `cert-manager` pods. - -### "default backend - 404" - -A number of things can cause the ingress-controller not to forward traffic to your rancher instance. Most of the time its due to a bad ssl configuration. - -Things to check - -* [Is Rancher Running](#is-rancher-running) -* [Cert CN is "Kubernetes Ingress Controller Fake Certificate"](#cert-cn-is-kubernetes-ingress-controller-fake-certificate) - -### Is Rancher Running - -Use `kubectl` to check the `cattle-system` system namespace and see if the Rancher pods are in a Running state. - -``` -kubectl -n cattle-system get pods - -NAME READY STATUS RESTARTS AGE -pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m -``` - -If the state is not `Running`, run a `describe` on the pod and check the Events. - -``` -kubectl -n cattle-system describe pod - -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 11m default-scheduler Successfully assigned rancher-784d94f59b-vgqzh to localhost - Normal SuccessfulMountVolume 11m kubelet, localhost MountVolume.SetUp succeeded for volume "rancher-token-dj4mt" - Normal Pulling 11m kubelet, localhost pulling image "rancher/rancher:v2.0.4" - Normal Pulled 11m kubelet, localhost Successfully pulled image "rancher/rancher:v2.0.4" - Normal Created 11m kubelet, localhost Created container - Normal Started 11m kubelet, localhost Started container -``` - -### Checking the rancher logs - -Use `kubectl` to list the pods. - -``` -kubectl -n cattle-system get pods - -NAME READY STATUS RESTARTS AGE -pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m -``` - -Use `kubectl` and the pod name to list the logs from the pod. - -``` -kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh -``` - -### Cert CN is "Kubernetes Ingress Controller Fake Certificate" - -Use your browser to check the certificate details. If it says the Common Name is "Kubernetes Ingress Controller Fake Certificate", something may have gone wrong with reading or issuing your SSL cert. - -> **Note:** if you are using LetsEncrypt to issue certs it can sometimes take a few minuets to issue the cert. - -#### cert-manager issued certs (Rancher Generated or LetsEncrypt) - -`cert-manager` has 3 parts. - -* `cert-manager` pod in the `kube-system` namespace. -* `Issuer` object in the `cattle-system` namespace. -* `Certificate` object in the `cattle-system` namespace. - -Work backwards and do a `kubectl describe` on each object and check the events. You can track down what might be missing. - -For example there is a problem with the Issuer: - -``` -kubectl -n cattle-system describe certificate -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning IssuerNotReady 18s (x23 over 19m) cert-manager Issuer rancher not ready -``` - -``` -kubectl -n cattle-system describe issuer -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning ErrInitIssuer 19m (x12 over 19m) cert-manager Error initializing issuer: secret "tls-rancher" not found - Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found -``` - -#### Bring Your Own SSL Certs - -Your certs get applied directly to the Ingress object in the `cattle-system` namespace. - -Check the status of the Ingress object and see if its ready. - -``` -kubectl -n cattle-system describe ingress -``` - -If its ready and the SSL is still not working you may have a malformed cert or secret. - -Check the nginx-ingress-controller logs. Because the nginx-ingress-controller has multiple containers in its pod you will need to specify the name of the container. - -``` -kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-controller -... -W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found -``` - -### no matches for kind "Issuer" - -The SSL configuration option you have chosen requires cert-manager to be installed before installing Rancher or else the following error is shown: - -``` -Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1" -``` - -Install cert-manager and try installing Rancher again. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md deleted file mode 100644 index c081ab688..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: "2. Install Kubernetes with RKE" -weight: 190 -aliases: - - /rancher/v2.x/en/installation/options/helm2/kubernetes-rke ---- - -Use RKE to install Kubernetes with a high availability etcd configuration. - ->**Note:** For systems without direct internet access see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) for install details. - -### Create the `rancher-cluster.yml` File - -Using the sample below create the `rancher-cluster.yml` file. Replace the IP Addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. - -> **Note:** If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. - - -```yaml -nodes: - - address: 165.227.114.63 - internal_address: 172.16.22.12 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 165.227.116.167 - internal_address: 172.16.32.37 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 165.227.127.226 - internal_address: 172.16.42.73 - user: ubuntu - role: [controlplane,worker,etcd] - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h -``` - -#### Common RKE Nodes Options - -| Option | Required | Description | -| --- | --- | --- | -| `address` | yes | The public DNS or IP address | -| `user` | yes | A user that can run docker commands | -| `role` | yes | List of Kubernetes roles assigned to the node | -| `internal_address` | no | The private DNS or IP address for internal cluster traffic | -| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | - -#### Advanced Configurations - -RKE has many configuration options for customizing the install to suit your specific environment. - -Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. - -For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide]({{}}/rancher/v2.x/en/installation/options/etcd/). - -### Run RKE - -``` -rke up --config ./rancher-cluster.yml -``` - -When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. - -### Testing Your Cluster - -RKE should have created a file `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. - -> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -You can copy this file to `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`. - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state. - -``` -kubectl get nodes - -NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 -``` - -### Check the Health of Your Cluster Pods - -Check that all the required pods and containers are healthy are ready to continue. - -* Pods are in `Running` or `Completed` state. -* `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` -* Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s -kube-system canal-jp4hz 3/3 Running 0 30s -kube-system canal-z2hg8 3/3 Running 0 30s -kube-system canal-z6kpw 3/3 Running 0 30s -kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s -kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s -kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s -kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s -kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s -kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s -kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s -``` - -### Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/troubleshooting/) page. - -### [Next: Initialize Helm (Install tiller)]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md deleted file mode 100644 index 741eecd1d..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Troubleshooting -weight: 276 -aliases: - - /rancher/v2.x/en/installation/options/helm2/kubernetes-rke/troubleshooting ---- - -### canal Pods show READY 2/3 - -The most common cause of this issue is port 8472/UDP is not open between the nodes. Check your local firewall, network routing or security groups. - -Once the network issue is resolved, the `canal` pods should timeout and restart to establish their connections. - -### nginx-ingress-controller Pods show RESTARTS - -The most common cause of this issue is the `canal` pods have failed to establish the overlay network. See [canal Pods show READY `2/3`](#canal-pods-show-ready-2-3) for troubleshooting. - -### Failed to set up SSH tunneling for host [xxx.xxx.xxx.xxx]: Can't retrieve Docker Info - -#### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed) - -* User specified to connect with does not have permission to access the Docker socket. This can be checked by logging into the host and running the command `docker ps`: - -``` -$ ssh user@server -user@server$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: -``` -$ nc xxx.xxx.xxx.xxx 22 -SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 -``` - -#### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found - -* The key file specified as `ssh_key_path` cannot be accessed. Make sure that you specified the private key file (not the public key, `.pub`), and that the user that is running the `rke` command can access the private key file. - -#### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain - -* The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. - -#### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys - -* If you want to use encrypted private keys, you should use `ssh-agent` to load your keys with your passphrase. If the `SSH_AUTH_SOCK` environment variable is found in the environment where the `rke` command is run, it will be used automatically to connect to the node. - -#### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? - -* The node is not reachable on the configured `address` and `port`. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/_index.md deleted file mode 100644 index 2bce2c021..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: RKE Add-On Install -weight: 276 -aliases: - - /rancher/v2.x/en/installation/options/helm2/rke-add-on ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - - -* [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb) -* [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb) -* [HTTP Proxy Configuration for a Kubernetes installation]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/proxy/) -* [Troubleshooting RKE Add-on Installs]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md deleted file mode 100644 index d40575bae..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Enable API Auditing -weight: 300 -aliases: - - /rke/latest/en/config-options/add-ons/api-auditing/ - - /rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing ---- - ->**Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. - -## In-line Arguments - -Enable API Auditing using RKE by adding arguments to your Rancher container. - -To enable API auditing: - -- Add API Auditing arguments (`args`) to your Rancher container. -- Declare a `mountPath` in the `volumeMounts` directive of the container. -- Declare a `path` in the `volumes` directive. - -For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing). - -```yaml -... -containers: - - image: rancher/rancher:latest - imagePullPolicy: Always - name: cattle-server - args: ["--audit-log-path", "/var/log/auditlog/rancher-api-audit.log", "--audit-log-maxbackup", "5", "--audit-log-maxsize", "50", "--audit-level", "2"] - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP - volumeMounts: - - mountPath: /etc/rancher/ssl - name: cattle-keys-volume - readOnly: true - - mountPath: /var/log/auditlog - name: audit-log-dir - volumes: - - name: cattle-keys-volume - secret: - defaultMode: 420 - secretName: cattle-keys-server - - name: audit-log-dir - hostPath: - path: /var/log/rancher/auditlog - type: Directory -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md deleted file mode 100644 index 7fe318649..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md +++ /dev/null @@ -1,400 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (TCP/Layer 4) -weight: 275 -aliases: - - /rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 4 load balancer (TCP) -- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) - -In a Kubernetes setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. - -Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers -![High-availability Kubernetes installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -
- -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -## 2. Configure Load Balancer - -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](./nlb) - ->**Note:** -> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. -> ->One caveat: do not use one of your Rancher nodes as the load balancer. - -### A. Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. - -### B. Create NGINX Configuration - -After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). - - >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - - **Example NGINX config:** - ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - http { - server { - listen 80; - return 301 https://$host$request_uri; - } - } - - stream { - upstream rancher_servers { - least_conn; - server IP_NODE_1:443 max_fails=3 fail_timeout=5s; - server IP_NODE_2:443 max_fails=3 fail_timeout=5s; - server IP_NODE_3:443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers; - } - } - ``` - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -### Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
`3-node-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate.yml) - - [Template for certificate signed by recognized CA
`3-node-certificate-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate-recognizedca.yml) - - >**Advanced Config Options:** - > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} - ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. - -1. In `kind: Secret` with `name: cattle-keys-ingress`: - - * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) - * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - - >**Note:** - > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== - ``` - -2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). - - >**Note:** - > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - - - **Step Result:** The file should look like the example below (the base64 encoded string should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - ``` - -{{% /accordion %}} - -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} - -If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the intermediate certificates in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. - -In the `kind: Secret` with `name: cattle-keys-ingress`: - -* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) -* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - ->**Note:** -> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: cattle-keys-ingress - namespace: cattle-system -type: Opaque -data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== -``` - -{{% /accordion %}} - - - -## 8. Configure FQDN - -There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). - -In the `kind: Ingress` with `name: cattle-ingress-http`: - -* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). - -After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): - -```yaml - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - rancher.yourdomain.com -``` - -Save the `.yml` file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - -``` -rke up --config rancher-cluster.yml -``` - -**Step Result:** The output should be similar to the snippet below: - -``` -INFO[0000] Building Kubernetes cluster -INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] -INFO[0000] [network] Deploying port listener containers -INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] -... -INFO[0101] Finished building Kubernetes cluster successfully -``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -You have a couple of options: - -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore]({{}}/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md deleted file mode 100644 index 40ce11c17..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Amazon NLB Configuration -weight: 277 -aliases: -- /rancher/v2.x/en/installation/ha-server-install/nlb/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Objectives - -Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. - -1. [Create Target Groups](#create-target-groups) - - Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -2. [Register Targets](#register-targets) - - Add your Linux nodes to the target groups. - -3. [Create Your NLB](#create-your-nlb) - - Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. - - -## Create Target Groups - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX controller on the nodes will make sure that port 80 gets redirected to port 443. - -Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. - -The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. - -{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} - -Click **Create target group** to create the first target group, regarding TCP port 443. - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-443` -Protocol | `TCP` -Port | `443` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `override`,`80` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 443 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} - -
-**Screenshot Target group TCP port 443 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} - -
- -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-80` -Protocol | `TCP` -Port | `80` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `traffic port` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 80 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} - -
-**Screenshot Target group TCP port 80 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} - -
- -## Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -## Create Your NLB - -Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. - -5. Complete the **Step 1: Configure Load Balancer** form. - - **Basic Configuration** - - - Name: `rancher` - - Scheme: `internet-facing` - - **Listeners** - - Add the **Load Balancer Protocols** and **Load Balancer Ports** below. - - `TCP`: `443` - - - **Availability Zones** - - - Select Your **VPC** and **Availability Zones**. - -6. Complete the **Step 2: Configure Routing** form. - - - From the **Target Group** drop-down, choose **Existing target group**. - - - From the **Name** drop-down, choose `rancher-tcp-443`. - - - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. - -8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. - -9. After AWS creates the NLB, click **Close**. - -## Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md deleted file mode 100644 index 8a8cd34c5..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) -weight: 276 -aliases: -- /rancher/v2.x/en/installation/ha-server-install-external-lb/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 7 Loadbalancer with SSL termination (HTTPS) -- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) - -In an Kubernetes setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. - -Kubernetes Rancher install with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -## 2. Configure Load Balancer - -When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. - -The load balancer has to be configured to support the following: - -* **WebSocket** connections -* **SPDY** / **HTTP/2** protocols -* Passing / setting the following headers: - -| Header | Value | Description | -|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | -| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | -| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | -| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | - -Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. - -We have example configurations for the following load balancers: - -* [Amazon ALB configuration](alb/) -* [NGINX configuration](nginx/) - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-certificate.yml) - - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-recognizedca.yml) - - >**Advanced Config Options:** - > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) - ->**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} -If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. -{{% /accordion %}} - -## 8. Configure FQDN - -There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). - -1. Open `rancher-cluster.yml`. - -2. In the `kind: Ingress` with `name: cattle-ingress-http:` - - Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ``` - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - ``` - - -3. Save the file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - - ``` - rke up --config rancher-cluster.yml - ``` - - **Step Result:** The output should be similar to the snippet below: - - ``` - INFO[0000] Building Kubernetes cluster - INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] - INFO[0000] [network] Deploying port listener containers - INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] - ... - INFO[0101] Finished building Kubernetes cluster successfully - ``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -- **Recommended:** Review [Creating Backups—High Availability Back Up and Restore]({{}}/rancher/v2.x/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.x/en/tasks/clusters/creating-a-cluster/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md deleted file mode 100644 index 0a6dd56b5..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Amazon ALB Configuration -weight: 277 -aliases: -- /rancher/v2.x/en/installation/ha-server-install-external-lb/alb/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/alb ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Kubernetes Rancher. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Objectives - -Configuring an Amazon ALB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. - -1. [Create Target Group](#create-target-group) - - Begin by creating one target group for the http protocol. You'll add your Linux nodes to this group. - -2. [Register Targets](#register-targets) - - Add your Linux nodes to the target group. - -3. [Create Your ALB](#create-your-alb) - - Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. - - -## Create Target Group - -Your first ALB configuration step is to create one target group for HTTP. - -Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. - -The document below will guide you through this process. Use the data in the tables below to complete the procedure. - -[Amazon Documentation: Create a Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-target-group.html) - -### Target Group (HTTP) - -Option | Setting -----------------------------|------------------------------------ -Target Group Name | `rancher-http-80` -Protocol | `HTTP` -Port | `80` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` - -## Register Targets - -Next, add your Linux nodes to your target group. - -[Amazon Documentation: Register Targets with Your Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-register-targets.html) - -### Create Your ALB - -Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target group you created in [Create Target Group](#create-target-group). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Application Load Balancer**. - -5. Complete the **Step 1: Configure Load Balancer** form. - - **Basic Configuration** - - - Name: `rancher-http` - - Scheme: `internet-facing` - - IP address type: `ipv4` - - **Listeners** - - Add the **Load Balancer Protocols** and **Load Balancer Ports** below. - - `HTTP`: `80` - - `HTTPS`: `443` - - - **Availability Zones** - - - Select Your **VPC** and **Availability Zones**. - -6. Complete the **Step 2: Configure Security Settings** form. - - Configure the certificate you want to use for SSL termination. - -7. Complete the **Step 3: Configure Security Groups** form. - -8. Complete the **Step 4: Configure Routing** form. - - - From the **Target Group** drop-down, choose **Existing target group**. - - - Add target group `rancher-http-80`. - -9. Complete **Step 5: Register Targets**. Since you registered your targets earlier, all you have to do it click **Next: Review**. - -10. Complete **Step 6: Review**. Look over the load balancer details and click **Create** when you're satisfied. - -11. After AWS creates the ALB, click **Close**. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md deleted file mode 100644 index 29fdec03c..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: NGINX Configuration -weight: 277 -aliases: -- /rancher/v2.x/en/installation/ha-server-install-external-lb/nginx/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. - -For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -## Create NGINX Configuration - -See [Example NGINX config]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#example-nginx-config). - -## Run NGINX - -* Reload or restart NGINX - - ```` - # Reload NGINX - nginx -s reload - - # Restart NGINX - # Depending on your Linux distribution - service nginx restart - systemctl restart nginx - ```` - -## Browse to Rancher UI - -You should now be to able to browse to `https://FQDN`. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md deleted file mode 100644 index 1a1f390e7..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: HTTP Proxy Configuration -weight: 277 -aliases: - - /rancher/v2.x/en/installation/options/helm2/rke-add-on/proxy ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. - -Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. - -Environment variable | Purpose ---------------------------|--------- -HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) -HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) -NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) - -> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. - -## Kubernetes installation - -When using Kubernetes installation, the environment variables need to be added to the RKE Config File template. - -* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template]({{}}/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/#5-download-rke-config-file-template) -* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template]({{}}/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/#5-download-rke-config-file-template) - -The environment variables should be defined in the `Deployment` inside the RKE Config File Template. You only have to add the part starting with `env:` to (but not including) `ports:`. Make sure the indentation is identical to the preceding `name:`. Required values for `NO_PROXY` are: - -* `localhost` -* `127.0.0.1` -* `0.0.0.0` -* Configured `service_cluster_ip_range` (default: `10.43.0.0/16`) - -The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage of the proxy when accessing network range `192.168.10.0/24`, the configured `service_cluster_ip_range` (`10.43.0.0/16`) and every hostname under the domain `example.com`. If you have changed the `service_cluster_ip_range`, you have to update the value below accordingly. - -```yaml -... ---- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:latest - imagePullPolicy: Always - name: cattle-server - env: - - name: HTTP_PROXY - value: "http://192.168.10.1:3128" - - name: HTTPS_PROXY - value: "http://192.168.10.1:3128" - - name: NO_PROXY - value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,192.168.10.0/24,example.com" - ports: -... -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md deleted file mode 100644 index 6727ea641..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 404 - default backend -weight: 30 -aliases: -- /rancher/v2.x/en/installation/troubleshooting-ha/404-default-backend/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend -- /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. - -When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. - -### Possible causes - -The nginx ingress controller is not able to serve the configured host in `rancher-cluster.yml`. This should be the FQDN you configured to access Rancher. You can check if it is properly configured by viewing the ingress that is created by running the following command: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress -n cattle-system -o wide -``` - -Check if the `HOSTS` column is displaying the FQDN you configured in the template, and that the used nodes are listed in the `ADDRESS` column. If that is configured correctly, we can check the logging of the nginx ingress controller. - -The logging of the nginx ingress controller will show why it cannot serve the requested host. To view the logs, you can run the following command - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx -``` - -Errors - -* `x509: certificate is valid for fqdn, not your_configured_fqdn` - -The used certificates do not contain the correct hostname. Generate new certificates that contain the chosen FQDN to access Rancher and redeploy. - -* `Port 80 is already in use. Please check the flag --http-port` - -There is a process on the node occupying port 80, this port is needed for the nginx ingress controller to route requests to Rancher. You can find the process by running the command: `netstat -plant | grep \:80`. - -Stop/kill the process and redeploy. - -* `unexpected error creating pem file: no valid PEM formatted block found` - -The base64 encoded string configured in the template is not valid. Please check if you can decode the configured string using `base64 -D STRING`, this should return the same output as the content of the file you used to generate the string. If this is correct, please check if the base64 encoded string is placed directly after the key, without any newlines before, in between or after. (For example: `tls.crt: LS01..`) diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md deleted file mode 100644 index d9041182e..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Troubleshooting HA RKE Add-On Install -weight: 370 -aliases: -- /rancher/v2.x/en/installation/troubleshooting-ha/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This section contains common errors seen when setting up a Kubernetes installation. - -Choose from the following options: - -- [Generic troubleshooting](generic-troubleshooting/) - - In this section, you can find generic ways to debug your Kubernetes cluster. - -- [Failed to set up SSH tunneling for host]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) - - In this section, you can find errors related to SSH tunneling when you run the `rke` command to setup your nodes. - -- [Failed to get job complete status](./job-complete-status/) - - In this section, you can find errors related to deploying addons. - -- [404 - default backend]({{}}/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/) - - In this section, you can find errors related to the `404 - default backend` page that is shown when trying to access Rancher. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md deleted file mode 100644 index e9807e680..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Generic troubleshooting -weight: 5 -aliases: -- /rancher/v2.x/en/installation/troubleshooting-ha/generic-troubleshooting/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -Below are steps that you can follow to determine what is wrong in your cluster. - -### Double check if all the required ports are opened in your (host) firewall - -Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. - -### All nodes should be present and in **Ready** state - -To check, run the command: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes -``` - -If a node is not shown in this output or a node is not in **Ready** state, you can check the logging of the `kubelet` container. Login to the node and run `docker logs kubelet`. - -### All pods/jobs should be in **Running**/**Completed** state - -To check, run the command: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get pods --all-namespaces -``` - -If a pod is not in **Running** state, you can dig into the root cause by running: - -#### Describe pod - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml describe pod POD_NAME -n NAMESPACE -``` - -#### Pod container logs - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs POD_NAME -n NAMESPACE -``` - -If a job is not in **Completed** state, you can dig into the root cause by running: - -#### Describe job - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml describe job JOB_NAME -n NAMESPACE -``` - -#### Logs from the containers of pods of the job - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=JOB_NAME -n NAMESPACE -``` - -### Check ingress - -Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (address(es) it will be routed to). - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress --all-namespaces -``` - -### List all Kubernetes cluster events - -Kubernetes cluster events are stored, and can be retrieved by running: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get events --all-namespaces -``` - -### Check Rancher container logging - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=cattle -n cattle-system -``` - -### Check NGINX ingress controller logging - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx -``` - -### Check if overlay network is functioning correctly - -The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. - -To test the overlay network, you can launch the following `DaemonSet` definition. This will run an `alpine` container on every host, which we will use to run a `ping` test between containers on all hosts. - -1. Save the following file as `ds-alpine.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: alpine - spec: - selector: - matchLabels: - name: alpine - template: - metadata: - labels: - name: alpine - spec: - tolerations: - - effect: NoExecute - key: "node-role.kubernetes.io/etcd" - value: "true" - - effect: NoSchedule - key: "node-role.kubernetes.io/controlplane" - value: "true" - containers: - - image: alpine - imagePullPolicy: Always - name: alpine - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl --kubeconfig kube_config_rancher-cluster.yml create -f ds-alpine.yml` -3. Wait until `kubectl --kubeconfig kube_config_rancher-cluster.yml rollout status ds/alpine -w` returns: `daemon set "alpine" successfully rolled out`. -4. Run the following command to let each container on every host ping each other (it's a single line command). - - ``` - echo "=> Start"; kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --kubeconfig kube_config_rancher-cluster.yml --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start - => End - ``` - -If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened between the hosts indicated. - -Example error output of a situation where NODE1 had the UDP ports blocked. - -``` -=> Start -command terminated with exit code 1 -NODE2 cannot reach NODE1 -command terminated with exit code 1 -NODE3 cannot reach NODE1 -command terminated with exit code 1 -NODE1 cannot reach NODE2 -command terminated with exit code 1 -NODE1 cannot reach NODE3 -=> End -``` diff --git a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md deleted file mode 100644 index 5f9853639..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Failed to get job complete status -weight: 20 -aliases: -- /rancher/v2.x/en/installation/troubleshooting-ha/job-complete-status/ -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.x/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. - -When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. - -### Failed to deploy addon execute job [rke-user-includes-addons]: Failed to get job complete status - -Something is wrong in the addons definitions, you can run the following command to get the root cause in the logging of the job: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=rke-user-addon-deploy-job -n kube-system -``` - -#### error: error converting YAML to JSON: yaml: line 9: - -The structure of the addons definition in `rancher-cluster.yml` is wrong. In the different resources specified in the addons section, there is a error in the structure of the YAML. The pointer `yaml line 9` references to the line number of the addon that is causing issues. - -Things to check -
    -
      -
    • Is each of the base64 encoded certificate string placed directly after the key, for example: `tls.crt: LS01...`, there should be no newline/space before, in between or after.
    • -
    • Is the YAML properly formatted, each indentation should be 2 spaces as shown in the template files.
    • -
    • Verify the integrity of your certificate by running this command `cat MyCertificate | base64 -d` on Linux, `cat MyCertificate | base64 -D` on Mac OS . If any error exists, the command output will tell you. -
    -
- -#### Error from server (BadRequest): error when creating "/etc/config/rke-user-addon.yaml": Secret in version "v1" cannot be handled as a Secret - -The base64 string of one of the certificate strings is wrong. The log message will try to show you what part of the string is not recognized as valid base64. - -Things to check -
    -
      -
    • Check if the base64 string is valid by running one of the commands below:
    • - -``` -# MacOS -echo BASE64_CRT | base64 -D -# Linux -echo BASE64_CRT | base64 -d -# Windows -certutil -decode FILENAME.base64 FILENAME.verify -``` - -
    -
- -#### The Ingress "cattle-ingress-http" is invalid: spec.rules[0].host: Invalid value: "IP": must be a DNS name, not an IP address - -The host value can only contain a host name, as it is needed by the ingress controller to match the hostname and pass to the correct backend. diff --git a/content/rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md deleted file mode 100644 index f23527b90..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md +++ /dev/null @@ -1,398 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (TCP/Layer 4) -weight: 275 -aliases: -- /rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb -- /rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb -- /rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 4 load balancer (TCP) -- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) - -In an HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. - -Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers -![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -
- -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -## 2. Configure Load Balancer - -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb) - ->**Note:** -> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. -> ->One caveat: do not use one of your Rancher nodes as the load balancer. - -### A. Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. - -### B. Create NGINX Configuration - -After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). - - >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - - **Example NGINX config:** - ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - http { - server { - listen 80; - return 301 https://$host$request_uri; - } - } - - stream { - upstream rancher_servers { - least_conn; - server IP_NODE_1:443 max_fails=3 fail_timeout=5s; - server IP_NODE_2:443 max_fails=3 fail_timeout=5s; - server IP_NODE_3:443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers; - } - } - ``` - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -### Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
]({{}}/rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-certificate) - - [Template for certificate signed by recognized CA
]({{}}/rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca) - - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} - ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -1. In `kind: Secret` with `name: cattle-keys-ingress`: - - * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) - * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - - >**Note:** - > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== - ``` - -2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). - - >**Note:** - > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - - - **Step Result:** The file should look like the example below (the base64 encoded string should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - ``` - -{{% /accordion %}} - -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} - -If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the intermediate certificates in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. - -In the `kind: Secret` with `name: cattle-keys-ingress`: - -* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) -* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - ->**Note:** -> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: cattle-keys-ingress - namespace: cattle-system -type: Opaque -data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== -``` - -{{% /accordion %}} - - - -## 8. Configure FQDN - -There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). - -In the `kind: Ingress` with `name: cattle-ingress-http`: - -* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). - -After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): - -```yaml - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - rancher.yourdomain.com -``` - -Save the `.yml` file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - -``` -rke up --config rancher-cluster.yml -``` - -**Step Result:** The output should be similar to the snippet below: - -``` -INFO[0000] Building Kubernetes cluster -INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] -INFO[0000] [network] Deploying port listener containers -INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] -... -INFO[0101] Finished building Kubernetes cluster successfully -``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -You have a couple of options: - -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore]({{}}/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md deleted file mode 100644 index 301684b76..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) -weight: 276 -aliases: -- /rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb -- /rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/ -- /rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 7 load balancer with SSL termination (HTTPS) -- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) - -In an HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. - -Rancher installed on a Kubernetes cluster with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). - -## 2. Configure Load Balancer - -When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. - -The load balancer has to be configured to support the following: - -* **WebSocket** connections -* **SPDY** / **HTTP/2** protocols -* Passing / setting the following headers: - -| Header | Value | Description | -|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | -| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | -| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | -| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | - -Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. - -We have example configurations for the following load balancers: - -* [Amazon ELB configuration]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) -* [NGINX configuration]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`]({{}}/rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate) - - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`]({{}}/rancher/v2.x/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca) - - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) - ->**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} -If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. -{{% /accordion %}} - -## 8. Configure FQDN - -There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). - -1. Open `rancher-cluster.yml`. - -2. In the `kind: Ingress` with `name: cattle-ingress-http:` - - Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ``` - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - ``` - - -3. Save the file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - - ``` - rke up --config rancher-cluster.yml - ``` - - **Step Result:** The output should be similar to the snippet below: - - ``` - INFO[0000] Building Kubernetes cluster - INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] - INFO[0000] [network] Deploying port listener containers - INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] - ... - INFO[0101] Finished building Kubernetes cluster successfully - ``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration]({{}}/rancher/v2.x/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.x/en/tasks/clusters/creating-a-cluster/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.x/en/installation/resources/advanced/single-node-install-external-lb/_index.md b/content/rancher/v2.x/en/installation/resources/advanced/single-node-install-external-lb/_index.md deleted file mode 100644 index 31aa8c42a..000000000 --- a/content/rancher/v2.x/en/installation/resources/advanced/single-node-install-external-lb/_index.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer -weight: 252 -aliases: - - /rancher/v2.x/en/installation/single-node/single-node-install-external-lb/ - - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb - - /rancher/v2.x/en/installation/options/single-node-install-external-lb - - /rancher/v2.x/en/installation/single-node-install-external-lb ---- - -For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. - -A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. - -This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. - -> **Want to skip the external load balancer?** -> See [Docker Installation]({{}}/rancher/v2.x/en/installation/single-node) instead. - -## Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) - -## Installation Outline - - - -- [1. Provision Linux Host](#1-provision-linux-host) -- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) -- [3. Configure Load Balancer](#3-configure-load-balancer) - - - -## 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements) to launch your Rancher Server. - -## 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Complete an Air Gap Installation? -> - Record all transactions with the Rancher API? -> -> See [Advanced Options](#advanced-options) below before continuing. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Bring Your Own Certificate: Self-Signed" %}} -If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. - -> **Prerequisites:** -> Create a self-signed certificate. -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Self-Signed Cert:** - -1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest - ``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Signed by Recognized CA" %}} -If your cluster is public facing, it's best to use a certificate signed by a recognized CA. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Cert Signed by a Recognized CA:** - -If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. - -1. Enter the following command. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest --no-cacerts - ``` - - {{% /accordion %}} - -## 3. Configure Load Balancer - -When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. - -The load balancer or proxy has to be configured to support the following: - -- **WebSocket** connections -- **SPDY** / **HTTP/2** protocols -- Passing / setting the following headers: - - | Header | Value | Description | - |--------|-------|-------------| - | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. - | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. - | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. - | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. -### Example NGINX configuration - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server rancher-server:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` - -
- -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -For help troubleshooting certificates, see [this section.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -## Advanced Options - -### API Auditing - -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. - - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - -### Air Gap - -If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.x/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - -``` -upstream rancher { - server rancher-server:80; -} - -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -server { - listen 443 ssl http2; - server_name rancher.yourdomain.com; - ssl_certificate /etc/your_certificate_directory/fullchain.pem; - ssl_certificate_key /etc/your_certificate_directory/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } -} - -server { - listen 80; - server_name rancher.yourdomain.com; - return 301 https://$server_name$request_uri; -} -``` - -
- diff --git a/content/rancher/v2.x/en/installation/resources/chart-options/_index.md b/content/rancher/v2.x/en/installation/resources/chart-options/_index.md deleted file mode 100644 index 5a40f7940..000000000 --- a/content/rancher/v2.x/en/installation/resources/chart-options/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Rancher Helm Chart Options -weight: 50 ---- - -The Rancher Helm chart options reference moved to [this page.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/) \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/choosing-version/_index.md b/content/rancher/v2.x/en/installation/resources/choosing-version/_index.md deleted file mode 100644 index d07a99469..000000000 --- a/content/rancher/v2.x/en/installation/resources/choosing-version/_index.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Choosing a Rancher Version -weight: 1 -aliases: - - /rancher/v2.x/en/installation/options/server-tags ---- - -This section describes how to choose a Rancher version. - -For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** - -The Helm chart version also applies to RancherD installs because RancherD installs the Rancher Helm chart on a Kubernetes cluster. - -{{% tabs %}} -{{% tab "Helm Charts" %}} - -When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. - -Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -### Helm Chart Repositories - -Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. - -| Type | Command to Add the Repo | Description of the Repo | -| -------------- | ------------ | ----------------- | -| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | -| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | -| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | - -
-Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). - -> **Note:** The introduction of the `rancher-latest` and `rancher-stable` Helm Chart repositories was introduced after Rancher v2.1.0, so the `rancher-stable` repository contains some Rancher versions that were never marked as `rancher/rancher:stable`. The versions of Rancher that were tagged as `rancher/rancher:stable` before v2.1.0 are v2.0.4, v2.0.6, v2.0.8. Post v2.1.0, all charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. - -### Helm Chart Versions - -Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
-    `helm search repo --versions` - -If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
-For more information, see https://helm.sh/docs/helm/helm_search_repo/ - -To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
-    `helm fetch rancher-stable/rancher --version=2.4.8` - -For the Rancher v2.1.x versions, there were some Helm charts where the version was a build number, i.e. `yyyy.mm.`. These charts have been replaced with the equivalent Rancher version and are no longer available. - -### Switching to a Different Helm Chart Repository - -After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. - -> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. - -{{< release-channel >}} - -1. List the current Helm chart repositories. - - ```plain - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - -2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. - - ```plain - helm repo remove rancher- - ``` - -3. Add the Helm chart repository that you want to start installing Rancher from. - - ```plain - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha) from the new Helm chart repository. -{{% /tab %}} -{{% tab "Docker Images" %}} -When performing [Docker installs]({{}}/rancher/v2.x/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. - -### Server Tags - -Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. - -| Tag | Description | -| -------------------------- | ------ | -| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | -| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | -| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | - -> **Notes:** -> -> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. -> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/resources/custom-ca-root-certificate/_index.md b/content/rancher/v2.x/en/installation/resources/custom-ca-root-certificate/_index.md deleted file mode 100644 index fddbc22de..000000000 --- a/content/rancher/v2.x/en/installation/resources/custom-ca-root-certificate/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: About Custom CA Root Certificates -weight: 1 -aliases: - - /rancher/v2.x/en/installation/options/custom-ca-root-certificate/ - - /rancher/v2.x/en/installation/resources/choosing-version/encryption/custom-ca-root-certificate ---- - -If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). - -Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. - -To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. - -Examples of services that Rancher can access: - -- Catalogs -- Authentication providers -- Accessing hosting/cloud API when using Node Drivers - -## Installing with the custom CA Certificate - -For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: - -- [Docker install Custom CA certificate options]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -- [Kubernetes install options for Additional Trusted CAs]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#additional-trusted-cas) - diff --git a/content/rancher/v2.x/en/installation/resources/feature-flags/_index.md b/content/rancher/v2.x/en/installation/resources/feature-flags/_index.md deleted file mode 100644 index d208259b3..000000000 --- a/content/rancher/v2.x/en/installation/resources/feature-flags/_index.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Enabling Experimental Features -weight: 17 -aliases: - - /rancher/v2.x/en/installation/options/feature-flags/ - - /rancher/v2.x/en/admin-settings/feature-flags/ ---- -Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. - -The features can be enabled in three ways: - -- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. -- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) in Rancher v2.3.3+ by going to the **Settings** page. -- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. - -Each feature has two values: - -- A default value, which can be configured with a flag or environment variable from the command line -- A set value, which can be configured with the Rancher API or UI - -If no value has been set, Rancher uses the default value. - -Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. - -For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. - -> **Note:** As of v2.4.0, there are some feature flags that may require a restart of the Rancher server container. These features that require a restart are marked in the table of these docs and in the UI. - -The following is a list of the feature flags available in Rancher: - -- `dashboard`: This feature enables the new experimental UI that has a new look and feel. The dashboard also leverages a new API in Rancher which allows the UI to access the default Kubernetes resources without any intervention from Rancher. -- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. -- `proxy`: This feature enables Rancher to use a new simplified code base for the proxy, which can help enhance performance and security. The proxy feature is known to have issues with Helm deployments, which prevents any catalog applications to be deployed which includes Rancher's tools like monitoring, logging, Istio, etc. It is discontinued in Rancher v2.5. -- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. -- `fleet`: Rancher comes with [Fleet]({{}}/rancher/v2.x/en/deploy-across-clusters/fleet/) preinstalled in v2.5+. - -The below table shows the availability and default value for feature flags in Rancher: - -| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | -| ----------------------------- | ------------- | ------------ | --------------- |---| -| `dashboard` | `true` | Experimental | v2.4.0 | x | -| `dashboard` | `true` | GA* and no longer a feature flag | v2.5.0 | x | -| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | -| `istio-virtual-service-ui` | `true` | GA* | v2.3.2 | | -| `proxy` | `false` | Experimental | v2.4.0 | | -| `proxy` | N/A | Discontinued | v2.5.0 | | -| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | -| `fleet` | `true` | GA* | v2.5.0 | | - -\* Generally Available. This feature is included in Rancher and it is not experimental. - -# Enabling Features when Starting Rancher - -When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. - -> **Note:** Values set from the Rancher API will override the value passed in through the command line. - -{{% tabs %}} -{{% tab "Kubernetes Install" %}} -When installing Rancher with a Helm chart, use the `--features` option. In the below example, two features are enabled by passing the feature flag names names in a comma separated list: - -``` -helm install rancher-latest/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -Note: If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -### Rendering the Helm Chart for Air Gap Installations - -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher) - -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm 3 command is as follows: - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -The Helm 2 command is as follows: - -``` -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -{{% /tab %}} -{{% tab "Docker Install" %}} -When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: - -``` -docker run -d -p 80:80 -p 443:443 \ - --restart=unless-stopped \ - rancher/rancher:rancher-latest \ - --features==true,=true # Available as of v2.3.0 -``` - -{{% /tab %}} -{{% /tabs %}} - -# Enabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate.** - -**Result:** The feature is disabled. - -# Enabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **True.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **False.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is disabled. diff --git a/content/rancher/v2.x/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md b/content/rancher/v2.x/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md deleted file mode 100644 index 02641bfd7..000000000 --- a/content/rancher/v2.x/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Allow Unsupported Storage Drivers -weight: 1 -aliases: - - /rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/ ---- - -This feature allows you to use types for storage providers and provisioners that are not enabled by default. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Description ----|---|--- - `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. - -### Types for Persistent Volume Plugins that are Enabled by Default -Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -### Types for StorageClass that are Enabled by Default -Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|-------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.x/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md deleted file mode 100644 index 71bd2a3cb..000000000 --- a/content/rancher/v2.x/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: UI for Istio Virtual Services and Destination Rules -weight: 2 -aliases: - - /rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui ---- - -This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. - -> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) in order to use the feature. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Status | Available as of ----|---|---|--- -`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 -`istio-virtual-service-ui` | `true` | GA | v2.3.2 - -# About this Feature - -A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. - -When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. - -The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** - -- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) -- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) - -To see these tabs, - -1. Go to the project view in Rancher and click **Resources > Istio.** -1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/helm-version/_index.md b/content/rancher/v2.x/en/installation/resources/helm-version/_index.md deleted file mode 100644 index cefc37652..000000000 --- a/content/rancher/v2.x/en/installation/resources/helm-version/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Helm Version Requirements -weight: 3 -aliases: - - /rancher/v2.x/en/installation/options/helm-version - - /rancher/v2.x/en/installation/options/helm2 - - /rancher/v2.x/en/installation/options/helm2/helm-init - - /rancher/v2.x/en/installation/options/helm2/helm-rancher ---- - -This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. - -> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.x/en/installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -- Helm v3.2.x or higher is required to install or upgrade Rancher v2.5. -- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. -- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. -- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/_index.md deleted file mode 100644 index a7004eda4..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Don't have a Kubernetes cluster? Try one of these tutorials." -weight: 4 ---- - -This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. - -In Rancher before v2.4, the Rancher server needed to run on an RKE Kubernetes cluster. - -In Rancher v2.4.x, Rancher need to run on either an RKE Kubernetes cluster or a K3s Kubernetes cluster. - -In Rancher v2.5, Rancher can run on any Kubernetes cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE/_index.md deleted file mode 100644 index 7ede4383f..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE/_index.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Setting up a High-availability RKE Kubernetes Cluster -shortTitle: Set up RKE Kubernetes -weight: 3 -aliases: - - /rancher/v2.x/en/installation/k8s-install/kubernetes-rke ---- - - -This section describes how to install a Kubernetes cluster. This cluster should be dedicated to run only the Rancher server. - -For Rancher before v2.4, Rancher should be installed on an RKE Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. - -As of Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. - -> As of Rancher v2.5, Rancher can run on any Kubernetes cluster, included hosted Kubernetes solutions such as Amazon EKS. So if you are installing Rancher v2.5, The below instructions represent only one possible way to install Kubernetes. - -The Rancher management server can only be run on Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. - -For systems without direct internet access, refer to [Air Gap: Kubernetes install.]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Installing Kubernetes - -### Required CLI Tools - -Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. - -Also install [RKE,]({{}}/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. - -### 1. Create the cluster configuration file - -In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. - -Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. - -If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. - -RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. - -```yaml -nodes: - - address: 165.227.114.63 - internal_address: 172.16.22.12 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.116.167 - internal_address: 172.16.32.37 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.127.226 - internal_address: 172.16.42.73 - user: ubuntu - role: [controlplane, worker, etcd] - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -# Required for external TLS termination with -# ingress-nginx v0.22+ -ingress: - provider: nginx - options: - use-forwarded-headers: "true" -``` - -
Common RKE Nodes Options
- -| Option | Required | Description | -| ------------------ | -------- | -------------------------------------------------------------------------------------- | -| `address` | yes | The public DNS or IP address | -| `user` | yes | A user that can run docker commands | -| `role` | yes | List of Kubernetes roles assigned to the node | -| `internal_address` | no | The private DNS or IP address for internal cluster traffic | -| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | - -> **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. -> -> Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. -> -> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide]({{}}/rancher/v2.x/en/installation/options/etcd/). - -### 2. Run RKE - -``` -rke up --config ./rancher-cluster.yml -``` - -When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. - -### 3. Test Your Cluster - -This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. - -Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. - -When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. - -> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`: - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: - -``` -kubectl get nodes - -NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 -``` - -### 4. Check the Health of Your Cluster Pods - -Check that all the required pods and containers are healthy are ready to continue. - -- Pods are in `Running` or `Completed` state. -- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` -- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s -kube-system canal-jp4hz 3/3 Running 0 30s -kube-system canal-z2hg8 3/3 Running 0 30s -kube-system canal-z6kpw 3/3 Running 0 30s -kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s -kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s -kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s -kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s -kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s -kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s -kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s -``` - -This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. - -### 5. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) page. - - -### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/) - diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2/_index.md deleted file mode 100644 index 4fed80ea7..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Setting up a High-availability RKE2 Kubernetes Cluster for Rancher -shortTitle: Set up RKE2 for Rancher -weight: 2 ---- -_Tested on v2.5.6_ - -This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) - -# Prerequisites - -These instructions assume you have set up three nodes, a load balancer, and a DNS record, as described in [this section.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha) - -Note that in order for RKE2 to work correctly with the load balancer, you need to set up two listeners: one for the supervisor on port 9345, and one for the Kubernetes API on port 6443. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the RKE2 version, use the INSTALL_RKE2_VERSION environment variable when running the RKE2 installation script. -# Installing Kubernetes - -### 1. Install Kubernetes and Set up the RKE2 Server - -RKE2 server runs with embedded etcd so you will not need to set up an external datastore to run in HA mode. - -1. On the first node, you should set up the configuration file with your own pre-shared secret as the token. The token argument can be set on startup. - -If you do not specify a pre-shared secret, RKE2 will generate one and place it at /var/lib/rancher/rke2/server/node-token. - -To avoid certificate errors with the fixed registration address, you should launch the server with the tls-san parameter set. This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access via both the IP and the hostname. - -Here is an example of what the RKE2 config file (at /etc/rancher/rke2/config.yaml) would look like if you are following this guide: - -``` -token: my-shared-secret -tls-san: - - my-kubernetes-domain.com - - another-kubernetes-domain.com -``` -After that you need to run the install command and enable and start rke2: -``` -curl -sfL https://get.rke2.io | sh - -systemctl enable rke2-server.service -systemctl start rke2-server.service -``` -1. To join the rest of the nodes, you need to configure each additional node with the same shared token or the one generated automatically. Here is an example of the configuration file: -``` -token: my-shared-secret -server: https://:9345 -tls-san: - - my-kubernetes-domain.com - - another-kubernetes-domain.com -``` -After that you need to run the installer and enable then start rke2 -``` -curl -sfL https://get.rke2.io | sh - -systemctl enable rke2-server.service -systemctl start rke2-server.service -``` - -1. Repeat the same command on your third RKE2 server node. - -### 2. Confirm that RKE2 is Running - -Once you've launched the rke2 server process on all server nodes, ensure that the cluster has come up properly with - -``` -/var/lib/rancher/rke2/bin/kubectl \ - --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes -You should see your server nodes in the Ready state. -``` - -Then test the health of the cluster pods: -``` -/var/lib/rancher/rke2/bin/kubectl \ - --kubeconfig /etc/rancher/rke2/rke2.yaml get pods --all-namespaces -``` - -**Result:** You have successfully set up a RKE2 Kubernetes cluster. - -### 3. Save and Start Using the kubeconfig File - -When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/rke2/rke2.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `rke2.yaml`: - -```yml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your RKE2 cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### 4. Check the Health of Your Cluster Pods - -Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. - -Check that all the required pods and containers are healthy are ready to continue: - -``` - /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get pods -A -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system etcd-ip-172-31-18-145 1/1 Running 0 4m37s -kube-system etcd-ip-172-31-25-73 1/1 Running 0 20m -kube-system etcd-ip-172-31-31-210 1/1 Running 0 9m12s -kube-system helm-install-rke2-canal-th9k9 0/1 Completed 0 21m -kube-system helm-install-rke2-coredns-6njr6 0/1 Completed 0 21m -kube-system helm-install-rke2-ingress-nginx-vztsd 0/1 Completed 0 21m -kube-system helm-install-rke2-kube-proxy-6std5 0/1 Completed 0 21m -kube-system helm-install-rke2-metrics-server-9sl7m 0/1 Completed 0 21m -kube-system kube-apiserver-ip-172-31-18-145 1/1 Running 0 4m22s -kube-system kube-apiserver-ip-172-31-25-73 1/1 Running 0 20m -kube-system kube-apiserver-ip-172-31-31-210 1/1 Running 0 9m8s -kube-system kube-controller-manager-ip-172-31-18-145 1/1 Running 0 4m8s -kube-system kube-controller-manager-ip-172-31-25-73 1/1 Running 0 21m -kube-system kube-controller-manager-ip-172-31-31-210 1/1 Running 0 8m55s -kube-system kube-proxy-57twm 1/1 Running 0 10m -kube-system kube-proxy-f7pc6 1/1 Running 0 5m24s -kube-system kube-proxy-rj4t5 1/1 Running 0 21m -kube-system kube-scheduler-ip-172-31-18-145 1/1 Running 0 4m15s -kube-system kube-scheduler-ip-172-31-25-73 1/1 Running 0 21m -kube-system kube-scheduler-ip-172-31-31-210 1/1 Running 0 8m48s -kube-system rke2-canal-4x972 2/2 Running 0 10m -kube-system rke2-canal-flh8m 2/2 Running 0 5m24s -kube-system rke2-canal-zfhkr 2/2 Running 0 21m -kube-system rke2-coredns-rke2-coredns-6cd96645d6-cmstq 1/1 Running 0 21m -kube-system rke2-ingress-nginx-controller-54946dd48f-6mp76 1/1 Running 0 20m -kube-system rke2-ingress-nginx-default-backend-5795954f8-p92xx 1/1 Running 0 20m -kube-system rke2-metrics-server-5f9b5757dc-k5sgh 1/1 Running 0 20m -``` - -**Result:** You have confirmed that you can access the cluster with `kubectl` and the RKE2 cluster is running successfully. Now the Rancher management server can be installed on the cluster. - -### 5. Configure nginx to be a daemonset - -Currently, RKE2 deploys nginx-ingress as a deployment, and that can impact the Rancher deployment so that you cannot use all servers to proxy requests to the Rancher pods. - -To rectify that, place the following file in /var/lib/rancher/rke2/server/manifests on any of the server nodes: - -```yaml -apiVersion: helm.cattle.io/v1 -kind: HelmChartConfig -metadata: - name: rke2-ingress-nginx - namespace: kube-system -spec: - valuesContent: |- - controller: - kind: DaemonSet - daemonset: - useHostPort: true -``` diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md deleted file mode 100644 index ab9ed12f1..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Setting up a High-availability K3s Kubernetes Cluster for Rancher -shortTitle: Set up K3s for Rancher -weight: 2 ---- - -This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) - -For systems without direct internet access, refer to the air gap installation instructions. - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node K3s cluster, run the Rancher server installation command on just one node instead of two nodes. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Prerequisites - -These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. -# Installing Kubernetes - -### 1. Install Kubernetes and Set up the K3s Server - -When running the command to start the K3s Kubernetes API server, you will pass in an option to use the external datastore that you set up earlier. - -1. Connect to one of the Linux nodes that you have prepared to run the Rancher server. -1. On the Linux node, run this command to start the K3s server and connect it to the external datastore: - ``` - curl -sfL https://get.k3s.io | sh -s - server \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" - ``` - To specify the K3s version, use the INSTALL_K3S_VERSION environment variable: - ```sh - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z sh -s - server \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" - ``` - Note: The datastore endpoint can also be passed in using the environment variable `$K3S_DATASTORE_ENDPOINT`. - -1. Repeat the same command on your second K3s server node. - -### 2. Confirm that K3s is Running - -To confirm that K3s has been set up successfully, run the following command on either of the K3s server nodes: -``` -sudo k3s kubectl get nodes -``` - -Then you should see two nodes with the master role: -``` -ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-172-31-60-194 Ready master 44m v1.17.2+k3s1 -ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1 -``` - -Then test the health of the cluster pods: -``` -sudo k3s kubectl get pods --all-namespaces -``` - -**Result:** You have successfully set up a K3s Kubernetes cluster. - -### 3. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -```yml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### 4. Check the Health of Your Cluster Pods - -Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. - -Check that all the required pods and containers are healthy are ready to continue: - -``` -ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d -kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d -kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d -``` - -**Result:** You have confirmed that you can access the cluster with `kubectl` and the K3s cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/how-ha-works/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/how-ha-works/_index.md deleted file mode 100644 index d10dfce29..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/how-ha-works/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: About High-availability Installations -weight: 1 ---- - -We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. - -In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. - -Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. - -The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. - -For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture) - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) -Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md deleted file mode 100644 index b9406d840..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. -shortTitle: Infrastructure Tutorials -weight: 5 ---- - -To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - - -To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/) diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md deleted file mode 100644 index 2e01e8158..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Setting up Nodes in Amazon EC2 -weight: 3 -aliases: - - /rancher/v2.x/en/installation/options/ec2-node ---- - -In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. - -If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. - -If the Rancher server is installed in a single Docker container, you only need one instance. - -### 1. Optional Preparation - -- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.]({{}}/rancher/v2.x/en/installation/requirements/#port-requirements) - -### 2. Provision Instances - -1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. -1. In the left panel, click **Instances.** -1. Click **Launch Instance.** -1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select.** -1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. -1. Click **Next: Configure Instance Details.** -1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. -1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. -1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** -1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements]({{}}/rancher/v2.x/en/installation/requirements/#port-requirements) for Rancher nodes. -1. Click **Review and Launch.** -1. Click **Launch.** -1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. -1. Click **Launch Instances.** - -**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. - -**Note:** If the nodes are being used for an RKE Kubernetes cluster, install Docker on each node in the next step. For a K3s Kubernetes cluster, the nodes are now ready to install K3s. - -### 3. Install Docker and Create User for RKE Kubernetes Cluster Nodes - -1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. -1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect.** -1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: -``` -sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] -``` -1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: -``` -curl https://releases.rancher.com/install-docker/18.09.sh | sh -``` -1. When you are connected to the instance, run the following command on the instance to add user `ubuntu` to group `docker`: -``` -sudo usermod -aG docker ubuntu -``` -1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. - -> To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. - -**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. - -### Next Steps for RKE Kubernetes Cluster Nodes - -If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. - -RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md deleted file mode 100644 index 3f940e03a..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' -weight: 1 ---- - -This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. - -The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. - -For more information about each installation option, refer to [this page.]({{}}/rancher/v2.x/en/installation) - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability K3s cluster, we recommend setting up the following infrastructure: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. We recommend MySQL. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set a [MySQL](https://www.mysql.com/) external database. Rancher has been tested on K3s Kubernetes clusters using MySQL version 5.7 as the datastore. - -When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the MySQL database, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/rds/) for setting up MySQL on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md deleted file mode 100644 index a6e267c4a..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' -weight: 2 ---- - -This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node/) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on any of the three nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md deleted file mode 100644 index cd1215d46..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability RKE2 Kubernetes Cluster' -weight: 1 ---- - -This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. - -The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a RKE2 Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability RKE2 cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.x/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.x/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on all nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE2 tool will deploy an Nginx Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Nginx Ingress controller to listen for traffic destined for the Rancher hostname. The Nginx Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md deleted file mode 100644 index 04cb28f55..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Setting up an NGINX Load Balancer -weight: 4 -aliases: - - /rancher/v2.x/en/installation/options/nginx ---- - -NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. - -In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. - -One caveat: do not use one of your Rancher nodes as the load balancer. - -> These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -## Install NGINX - -Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. - -## Create NGINX Configuration - -After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your nodes. - - > **Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. - -
Example NGINX config
- ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - stream { - upstream rancher_servers_http { - least_conn; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - } - server { - listen 80; - proxy_pass rancher_servers_http; - } - - upstream rancher_servers_https { - least_conn; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers_https; - } - - } - ``` - - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -## Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md deleted file mode 100644 index 39e4ca0fb..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Setting up Amazon ELB Network Load Balancer -weight: 5 -aliases: - - /rancher/v2.x/en/installation/ha/create-nodes-lb/nlb - - /rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb - - /rancher/v2.x/en/installation/options/nlb ---- - -This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. - -These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. - -Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. - -# Setting up the Load Balancer - -Configuring an Amazon NLB is a multistage process: - -1. [Create Target Groups](#1-create-target-groups) -2. [Register Targets](#2-register-targets) -3. [Create Your NLB](#3-create-your-nlb) -4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) - -# Requirements - -These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. - -# 1. Create Target Groups - -Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. - -Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. - -1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -1. Click **Create target group** to create the first target group, regarding TCP port 443. - -> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. - -| Option | Setting | -|-------------------|-------------------| -| Target Group Name | `rancher-tcp-443` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `443` | -| VPC | Choose your VPC | - -Health check settings: - -| Option | Setting | -|---------------------|-----------------| -| Protocol | TCP | -| Port | `override`,`80` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. - -| Option | Setting | -|-------------------|------------------| -| Target Group Name | `rancher-tcp-80` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `80` | -| VPC | Choose your VPC | - - -Health check settings: - -| Option |Setting | -|---------------------|----------------| -| Protocol | TCP | -| Port | `traffic port` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -# 2. Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -# 3. Create Your NLB - -Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. Then complete each form. - -- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) -- [Step 2: Configure Routing](#step-2-configure-routing) -- [Step 3: Register Targets](#step-3-register-targets) -- [Step 4: Review](#step-4-review) - -### Step 1: Configure Load Balancer - -Set the following fields in the form: - -- **Name:** `rancher` -- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. -- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. -- **Availability Zones:** Select Your **VPC** and **Availability Zones**. - -### Step 2: Configure Routing - -1. From the **Target Group** drop-down, choose **Existing target group**. -1. From the **Name** drop-down, choose `rancher-tcp-443`. -1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -### Step 3: Register Targets - -Since you registered your targets earlier, all you have to do is click **Next: Review**. - -### Step 4: Review - -Look over the load balancer details and click **Create** when you're satisfied. - -After AWS creates the NLB, click **Close**. - -# 4. Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. - -# Health Check Paths for NGINX Ingress and Traefik Ingresses - -K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. - -For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. - -- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. -- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. - -To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md b/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md deleted file mode 100644 index ad9bd9055..000000000 --- a/content/rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Setting up a MySQL Database in Amazon RDS -weight: 4 -aliases: - - /rancher/v2.x/en/installation/options/rds ---- -This tutorial describes how to set up a MySQL database in Amazon's RDS. - -This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. - -1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. In the left panel, click **Databases.** -1. Click **Create database.** -1. In the **Engine type** section, click **MySQL.** -1. In the **Version** section, choose **MySQL 5.7.22.** -1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. -1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. -1. Click **Create database.** - -You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. - -To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. - -- **Username:** Use the admin username. -- **Password:** Use the admin password. -- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. -- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. -- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name.** - -This information will be used to connect to the database in the following format: - -``` -mysql://username:password@tcp(hostname:3306)/database-name -``` - -For more information on configuring the datastore for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) diff --git a/content/rancher/v2.x/en/installation/resources/local-system-charts/_index.md b/content/rancher/v2.x/en/installation/resources/local-system-charts/_index.md deleted file mode 100644 index f4f70dc77..000000000 --- a/content/rancher/v2.x/en/installation/resources/local-system-charts/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Setting up Local System Charts for Air Gapped Installations -weight: 120 -aliases: - - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md - - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md - - /rancher/v2.x/en/installation/options/local-system-charts ---- - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. - -In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag in Rancher v2.3.0, and using a Git mirror for Rancher versions before v2.3.0. - -# Using Local System Charts in Rancher v2.3.0 - -In Rancher v2.3.0, a local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. - -Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation]({{}}/rancher/v2.x/en/installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/) instructions. - -# Setting Up System Charts for Rancher Before v2.3.0 - -### A. Prepare System Charts - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. - -Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. - -### B. Configure System Charts - -Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. - -{{% tabs %}} -{{% tab "Rancher UI" %}} - -In the catalog management page in the Rancher UI, follow these steps: - -1. Go to the **Global** view. - -1. Click **Tools > Catalogs.** - -1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **⋮ > Edit.** - -1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. - -1. Click **Save.** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -{{% /tab %}} -{{% tab "Rancher API" %}} - -1. Log into Rancher. - -1. Open `https:///v3/catalogs/system-library` in your browser. - - {{< img "/img/rancher/airgap/system-charts-setting.png" "Open">}} - -1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - - {{< img "/img/rancher/airgap/system-charts-update.png" "Update">}} - -1. Click **Show Request** - -1. Click **Send Request** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/resources/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/resources/tls-secrets/_index.md deleted file mode 100644 index da47ec64d..000000000 --- a/content/rancher/v2.x/en/installation/resources/tls-secrets/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Adding TLS Secrets -weight: 2 -aliases: - - /rancher/v2.x/en/installation/options/tls-secrets/ - - /rancher/v2.x/en/installation/resources/encryption/tls-secrets ---- - -Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. - -Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. - -For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. -This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. - -Use `kubectl` with the `tls` secret type to create the secrets. - -``` -kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. - -# Using a Private CA Signed Certificate - -If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. - -Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. - -``` -kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem=./cacerts.pem -``` - -> **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. - -# Updating a Private CA Certificate - -Follow the steps on [this page]({{}}/rancher/v2.x/en/installation/resources/update-ca-cert) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/tls-settings/_index.md b/content/rancher/v2.x/en/installation/resources/tls-settings/_index.md deleted file mode 100644 index 46094ee1a..000000000 --- a/content/rancher/v2.x/en/installation/resources/tls-settings/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: TLS Settings -weight: 3 -aliases: - - /rancher/v2.x/en/installation/options/tls-settings/ - - /rancher/v2.x/en/admin-settings/tls-settings - - /rancher/v2.x/en/installation/resources/encryption/tls-settings ---- - -In Rancher v2.1.7, the default TLS configuration changed to only accept TLS 1.2 and secure TLS cipher suites. TLS 1.3 and TLS 1.3 exclusive cipher suites are not supported. - -# Configuring TLS settings - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [TLS settings in Docker options]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#tls-settings) - -- [TLS settings in Helm chart options]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/#tls-settings) - -# TLS Environment Variables - -| Parameter | Description | Default | Available options | -|-----|-----|-----|-----| -| `CATTLE_TLS_MIN_VERSION` | Minimum TLS version | `1.2` | `1.0`, `1.1`, `1.2` | -| `CATTLE_TLS_CIPHERS` | Allowed TLS cipher suites | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | See [Golang tls constants](https://golang.org/pkg/crypto/tls/#pkg-constants) | - - -# Legacy configuration - -If you need to configure TLS the same way as it was before Rancher v2.1.7, please use the following settings: - - -| Parameter | Legacy value | -|-----|-----| -| `CATTLE_TLS_MIN_VERSION` | `1.0` | -| `CATTLE_TLS_CIPHERS` | `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,`
`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,`
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,`
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,`
`TLS_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_RSA_WITH_AES_128_CBC_SHA,`
`TLS_RSA_WITH_AES_256_CBC_SHA,`
`TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,`
`TLS_RSA_WITH_3DES_EDE_CBC_SHA` diff --git a/content/rancher/v2.x/en/installation/resources/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/resources/troubleshooting/_index.md deleted file mode 100644 index 1d367a12a..000000000 --- a/content/rancher/v2.x/en/installation/resources/troubleshooting/_index.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Troubleshooting the Rancher Server Kubernetes Cluster -weight: 276 -aliases: - - /rancher/v2.x/en/installation/k8s-install/helm-rancher/troubleshooting - - /rancher/v2.x/en/installation/ha/kubernetes-rke/troubleshooting - - /rancher/v2.x/en/installation/k8s-install/kubernetes-rke/troubleshooting - - /rancher/v2.x/en/installation/options/troubleshooting ---- - -This section describes how to troubleshoot an installation of Rancher on a Kubernetes cluster. - -### Relevant Namespaces - -Most of the troubleshooting will be done on objects in these 3 namespaces. - -- `cattle-system` - `rancher` deployment and pods. -- `ingress-nginx` - Ingress controller pods and services. -- `cert-manager` - `cert-manager` pods. - -### "default backend - 404" - -A number of things can cause the ingress-controller not to forward traffic to your rancher instance. Most of the time its due to a bad ssl configuration. - -Things to check - -- [Is Rancher Running](#check-if-rancher-is-running) -- [Cert CN is "Kubernetes Ingress Controller Fake Certificate"](#cert-cn-is-kubernetes-ingress-controller-fake-certificate) - -### Check if Rancher is Running - -Use `kubectl` to check the `cattle-system` system namespace and see if the Rancher pods are in a Running state. - -``` -kubectl -n cattle-system get pods - -NAME READY STATUS RESTARTS AGE -pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m -``` - -If the state is not `Running`, run a `describe` on the pod and check the Events. - -``` -kubectl -n cattle-system describe pod - -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 11m default-scheduler Successfully assigned rancher-784d94f59b-vgqzh to localhost - Normal SuccessfulMountVolume 11m kubelet, localhost MountVolume.SetUp succeeded for volume "rancher-token-dj4mt" - Normal Pulling 11m kubelet, localhost pulling image "rancher/rancher:v2.0.4" - Normal Pulled 11m kubelet, localhost Successfully pulled image "rancher/rancher:v2.0.4" - Normal Created 11m kubelet, localhost Created container - Normal Started 11m kubelet, localhost Started container -``` - -### Check the Rancher Logs - -Use `kubectl` to list the pods. - -``` -kubectl -n cattle-system get pods - -NAME READY STATUS RESTARTS AGE -pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m -``` - -Use `kubectl` and the pod name to list the logs from the pod. - -``` -kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh -``` - -### Cert CN is "Kubernetes Ingress Controller Fake Certificate" - -Use your browser to check the certificate details. If it says the Common Name is "Kubernetes Ingress Controller Fake Certificate", something may have gone wrong with reading or issuing your SSL cert. - -> **Note:** if you are using LetsEncrypt to issue certs it can sometimes take a few minutes to issue the cert. - -### Checking for issues with cert-manager issued certs (Rancher Generated or LetsEncrypt) - -`cert-manager` has 3 parts. - -- `cert-manager` pod in the `cert-manager` namespace. -- `Issuer` object in the `cattle-system` namespace. -- `Certificate` object in the `cattle-system` namespace. - -Work backwards and do a `kubectl describe` on each object and check the events. You can track down what might be missing. - -For example there is a problem with the Issuer: - -``` -kubectl -n cattle-system describe certificate -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning IssuerNotReady 18s (x23 over 19m) cert-manager Issuer rancher not ready -``` - -``` -kubectl -n cattle-system describe issuer -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning ErrInitIssuer 19m (x12 over 19m) cert-manager Error initializing issuer: secret "tls-rancher" not found - Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found -``` - -### Checking for Issues with Your Own SSL Certs - -Your certs get applied directly to the Ingress object in the `cattle-system` namespace. - -Check the status of the Ingress object and see if its ready. - -``` -kubectl -n cattle-system describe ingress -``` - -If its ready and the SSL is still not working you may have a malformed cert or secret. - -Check the nginx-ingress-controller logs. Because the nginx-ingress-controller has multiple containers in its pod you will need to specify the name of the container. - -``` -kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-controller -... -W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found -``` - -### No matches for kind "Issuer" - -The SSL configuration option you have chosen requires cert-manager to be installed before installing Rancher or else the following error is shown: - -``` -Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1" -``` - -Install cert-manager and try installing Rancher again. - - -### Canal Pods show READY 2/3 - -The most common cause of this issue is port 8472/UDP is not open between the nodes. Check your local firewall, network routing or security groups. - -Once the network issue is resolved, the `canal` pods should timeout and restart to establish their connections. - -### nginx-ingress-controller Pods show RESTARTS - -The most common cause of this issue is the `canal` pods have failed to establish the overlay network. See [canal Pods show READY `2/3`](#canal-pods-show-ready-2-3) for troubleshooting. - - -### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed) - -Some causes of this error include: - -* User specified to connect with does not have permission to access the Docker socket. This can be checked by logging into the host and running the command `docker ps`: - -``` -$ ssh user@server -user@server$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: -``` -$ nc xxx.xxx.xxx.xxx 22 -SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 -``` - -### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found - -The key file specified as `ssh_key_path` cannot be accessed. Make sure that you specified the private key file (not the public key, `.pub`), and that the user that is running the `rke` command can access the private key file. - -### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain - -The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. - -### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys - -If you want to use encrypted private keys, you should use `ssh-agent` to load your keys with your passphrase. If the `SSH_AUTH_SOCK` environment variable is found in the environment where the `rke` command is run, it will be used automatically to connect to the node. - -### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? - -The node is not reachable on the configured `address` and `port`. diff --git a/content/rancher/v2.x/en/installation/resources/update-ca-cert/_index.md b/content/rancher/v2.x/en/installation/resources/update-ca-cert/_index.md deleted file mode 100644 index 1b9368fe8..000000000 --- a/content/rancher/v2.x/en/installation/resources/update-ca-cert/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Updating a Private CA Certificate -weight: 10 ---- - -Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. - -A summary of the steps is as follows: - -1. Create or update the `tls-rancher-ingress` Kubernetes secret resource with the new certificate and private key. -2. Create or update the `tls-ca` Kubernetes secret resource with the root CA certificate (only required when using a private CA). -3. Update the Rancher installation using the Helm CLI. -4. Reconfigure the Rancher agents to trust the new CA certificate. - -The details of these instructions are below. - -# 1. Create/update the certificate secret resource - -First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. - -If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -Alternatively, to update an existing certificate secret: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -# 2. Create/update the CA certificate secret resource - -If the new certificate was signed by a private CA, you will need to copy the corresponding root CA certificate into a file named `cacerts.pem` and create or update the `tls-ca secret` in the `cattle-system` namespace. If the certificate was signed by an intermediate CA, then the `cacerts.pem` must contain both the intermediate and root CA certificates (in this order). - -To create the initial secret: - -``` -$ kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem -``` - -To update an existing `tls-ca` secret: - -``` -$ kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -# 3. Reconfigure the Rancher deployment - -> Before proceeding, generate an API token in the Rancher UI (User > API & Keys) and save the Bearer Token which you might need in step 4. - -This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). - -It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. - -To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: - -``` -$ helm get values rancher -n cattle-system -``` - -Also get the version string of the currently deployed Rancher chart: - -``` -$ helm ls -A -``` - -Upgrade the Helm application instance using the original configuration values and making sure to specify `ingress.tls.source=secret` as well as the current chart version to prevent an application upgrade. - -If the certificate was signed by a private CA, add the `set privateCA=true` argument as well. Also make sure to read the documentation describing the initial installation using custom certificates. - -``` -helm upgrade rancher rancher-stable/rancher \ - --namespace cattle-system \ - --version \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set ... -``` - -When the upgrade is completed, navigate to `https:///v3/settings/cacerts` to verify that the value matches the CA certificate written in the `tls-ca` secret earlier. - -# 4. Reconfigure Rancher agents to trust the private CA - -This section covers three methods to reconfigure Rancher agents to trust the private CA. This step is required if either of the following is true: - -- Rancher was initially configured to use the Rancher self-signed certificate (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`) -- The root CA certificate for the new custom certificate has changed - -### Why is this step required? - -When Rancher is configured with a certificate signed by a private CA, the CA certificate chain is downloaded into Rancher agent containers. Agents compare the checksum of the downloaded certificate against the `CATTLE_CA_CHECKSUM` environment variable. This means that, when the private CA certificate is changed on Rancher server side, the environvment variable `CATTLE_CA_CHECKSUM` must be updated accordingly. - -### Which method should I choose? - -Method 1 is the easiest one but requires all clusters to be connected to Rancher after the certificates have been rotated. This is usually the case if the process is performed right after updating the Rancher deployment (Step 3). - -If the clusters have lost connection to Rancher but you have [Authorized Cluster Endpoints](https://rancher.com/docs/rancher/v2.x/en/cluster-admin/cluster-access/ace/) enabled, then go with method 2. - -Method 3 can be used as a fallback if method 1 and 2 are unfeasible. - -### Method 1: Kubectl command - -For each cluster under Rancher management (including `local`) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). - -``` -kubectl patch clusters -p '{"status":{"agentImage":"dummy"}}' --type merge -``` - -This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. - - -### Method 2: Manually update checksum - -Manually patch the agent Kubernetes resources by updating the `CATTLE_CA_CHECKSUM` environment variable to the value matching the checksum of the new CA certificate. Generate the new checksum value like so: - -``` -$ curl -k -s -fL /v3/settings/cacerts | jq -r .value > cacert.tmp -$ sha256sum cacert.tmp | awk '{print $1}' -``` - -Using a Kubeconfig for each downstream cluster update the environment variable for the two agent deployments. - -``` -$ kubectl edit -n cattle-system ds/cattle-node-agent -$ kubectl edit -n cattle-system deployment/cluster-agent -``` - -### Method 3: Recreate Rancher agents - -With this method you are recreating the Rancher agents by running a set of commands on a controlplane node of each downstream cluster. - -First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 - -Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: -https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/resources/upgrading-cert-manager/_index.md b/content/rancher/v2.x/en/installation/resources/upgrading-cert-manager/_index.md deleted file mode 100644 index f48dea027..000000000 --- a/content/rancher/v2.x/en/installation/resources/upgrading-cert-manager/_index.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: Upgrading Cert-Manager -weight: 4 -aliases: - - /rancher/v2.x/en/installation/options/upgrading-cert-manager - - /rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.x/en/installation/resources/encryption/upgrading-cert-manager ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. - -# Upgrade Cert-Manager - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions) - -In order to upgrade cert-manager, follow these instructions: - -### Option A: Upgrade cert-manager with Internet Access - -{{% accordion id="normal" label="Click to expand" %}} -1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) - - ```plain - helm uninstall cert-manager - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed - - ```plain - kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager if needed - - ```plain - kubectl create namespace cert-manager - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v0.12.0 - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Option B: Upgrade cert-manager in an Air Gap Environment - -{{% accordion id="airgap" label="Click to expand" %}} - -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - The Helm 3 command is as follows: - - ```plain - helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - - The Helm 2 command is as follows: - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager (old and new) - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n cert-manager \ - delete deployment,sa,clusterrole,clusterrolebinding \ - -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y you installed - - ```plain - kubectl delete -f cert-manager/cert-manager-crd-old.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager - - ```plain - kubectl create namespace cert-manager - ``` - -1. Install cert-manager - - ```plain - kubectl -n cert-manager apply -R -f ./cert-manager - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Verify the Deployment - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). - diff --git a/content/rancher/v2.x/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md b/content/rancher/v2.x/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md deleted file mode 100644 index 73cca9cd3..000000000 --- a/content/rancher/v2.x/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Upgrading Cert-Manager with Helm 2 -weight: 2040 -aliases: - - /rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.x/en/installation/resources/choosing-version/encryption/upgrading-cert-manager/helm-2-instructions ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's offficial documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.x/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. - -## Upgrade Cert-Manager Only - -> **Note:** -> These instructions are applied if you have no plan to upgrade Rancher. - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -In order to upgrade cert-manager, follow these instructions: - -{{% accordion id="normal" label="Upgrading cert-manager with Internet access" %}} -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml - ``` - -1. Delete the existing deployment - - ```plain - helm delete --purge cert-manager - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install --version 0.12.0 --name cert-manager --namespace kube-system jetstack/cert-manager - ``` -{{% /accordion %}} - -{{% accordion id="airgap" label="Upgrading cert-manager in an airgapped environment" %}} -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace kube-system \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml - ``` - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n kube-system delete deployment,sa,clusterrole,clusterrolebinding -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - -1. Install cert-manager - - ```plain - kubectl -n kube-system apply -R -f ./cert-manager - ``` -{{% /accordion %}} - - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace kube-system - -NAME READY STATUS RESTARTS AGE -cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m -cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m -cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m -``` - -If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check cert-manager's [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. - -> **Note:** The above instructions ask you to add the disable-validation label to the kube-system namespace. Here are additional resources that explain why this is necessary: -> -> - [Information on the disable-validation label](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.4-0.5.html?highlight=certmanager.k8s.io%2Fdisable-validation#disabling-resource-validation-on-the-cert-manager-namespace) -> - [Information on webhook validation for certificates](https://docs.cert-manager.io/en/latest/getting-started/webhook.html) - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be `cert-manager.io` instead of `certmanager.k8s.io.` - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/content/rancher/v2.x/en/istio/_index.md b/content/rancher/v2.x/en/istio/_index.md deleted file mode 100644 index 0534d1d2b..000000000 --- a/content/rancher/v2.x/en/istio/_index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Istio -weight: 15 -aliases: - - /rancher/v2.x/en/dashboard/istio ---- - -In Rancher 2.5, the Istio application was improved. - - -If you are using Rancher v2.5, refer to the Istio documentation [here.]({{}}/rancher/v2.x/en/istio/v2.5) - -If you are using Rancher v2.3-v2.4, refer to the Istio documentation [here.]({{}}/rancher/v2.x/en/istio/v2.3.x-v2.4.x) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/_index.md deleted file mode 100644 index cd5d22c17..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Istio in Rancher v2.3-v2.4 -shortTitle: Rancher v2.3-v2.4 -weight: 2 -aliases: - - /rancher/v2.x/en/project-admin/istio/configuring-resource-allocations/ - - /rancher/v2.x/en/cluster-admin/tools/istio/ - - /rancher/v2.x/en/project-admin/istio - - /rancher/v2.x/en/istio/legacy/cluster-istio ---- -_Available as of v2.3.0_ - -> In Rancher 2.5, the Istio application was improved. There are now two ways to enable Istio. The older way is documented in this section, and the new application for Istio is documented [here.]({{}}/rancher/v2.x/en/istio/v2.5) - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. - -As a network of microservices changes and grows, the interactions between them can become more difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. - -Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -This service mesh provides features that include but are not limited to the following: - -- Traffic management features -- Enhanced monitoring and tracing -- Service discovery and routing -- Secure connections and service-to-service authentication with mutual TLS -- Load balancing -- Automatic retries, backoff, and circuit breaking - -After Istio is enabled in a cluster, you can leverage Istio's control plane functionality with `kubectl`. - -Rancher's Istio integration comes with comprehensive visualization aids: - -- **Trace the root cause of errors with Jaeger.** [Jaeger](https://www.jaegertracing.io/) is an open-source tool that provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. Distributed tracing allows you to view an entire chain of calls, which might originate with a user request and traverse dozens of microservices. -- **Get the full picture of your microservice architecture with Kiali.** [Kiali](https://www.kiali.io/) provides a diagram that shows the services within a service mesh and how they are connected, including the traffic rates and latencies between them. You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. -- **Gain insights from time series analytics with Grafana dashboards.** [Grafana](https://grafana.com/) is an analytics platform that allows you to query, visualize, alert on and understand the data gathered by Prometheus. -- **Write custom queries for time series data with the Prometheus UI.** [Prometheus](https://prometheus.io/) is a systems monitoring and alerting toolkit. Prometheus scrapes data from your cluster, which is then used by Grafana. A Prometheus UI is also integrated into Rancher, and lets you write custom queries for time series data and see the results in the UI. - - -Istio needs to be set up by a Rancher administrator or cluster administrator before it can be used in a project. - -# Prerequisites - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) to run all of the components of Istio. - -# Setup Guide - -Refer to the [setup guide]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) for instructions on how to set up Istio and use it in a project. - -# Disabling Istio - -To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio) - -# Accessing Visualizations - -> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, see [this section.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/rbac/) - -After Istio is set up in a cluster, Grafana, Prometheus, Jaeger, and Kiali are available in the Rancher UI. - -Your access to the visualizations depend on your role. Grafana and Prometheus are only available for cluster owners. The Kiali and Jaeger UIs are available only to cluster owners by default, but cluster owners can allow project members to access them by editing the Istio settings. When you go to your project and click **Resources > Istio,** you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. - -To see the visualizations, go to the cluster where Istio is set up and click **Tools > Istio.** You should see links to each UI at the top of the page. - -You can also get to the visualization tools from the project view. - -# Viewing the Kiali Traffic Graph - -1. From the project view in Rancher, click **Resources > Istio.** -1. If you are a cluster owner, you can go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. - -# Viewing Traffic Metrics - -Istio’s monitoring features provide visibility into the performance of all your services. - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** Cluster owners can see all of the metrics, while project members can see a subset of the metrics. - -# Architecture - -Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. - -Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. - -Enabling Istio in Rancher enables monitoring in the cluster, and enables Istio in all new namespaces that are created in a cluster. You need to manually enable Istio in preexisting namespaces. - -When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. - -For more information on the Istio sidecar, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). - -### Two Ingresses - -By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. To allow Istio to receive external traffic, you need to enable the Istio ingress gateway for the cluster. The result is that your cluster will have two ingresses. - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/disabling-istio/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/disabling-istio/_index.md deleted file mode 100644 index 0c8113c11..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/disabling-istio/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Disabling Istio -weight: 4 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio - - /rancher/v2.x/en/istio/legacy/disabling-istio ---- - -This section describes how to disable Istio in a cluster, namespace, or workload. - -# Disable Istio in a Cluster - -To disable Istio, - -1. From the **Global** view, navigate to the cluster that you want to disable Istio for. -1. Click **Tools > Istio.** -1. Click **Disable,** then click the red button again to confirm the disable action. - -**Result:** The `cluster-istio` application in the cluster's `system` project gets removed. The Istio sidecar cannot be deployed on any workloads in the cluster. - -# Disable Istio in a Namespace - -1. In the Rancher UI, go to the project that has the namespace where you want to disable Istio. -1. On the **Workloads** tab, you will see a list of namespaces and the workloads deployed in them. Go to the namespace where you want to disable and click the **⋮ > Disable Istio Auto Injection.** - -**Result:** When workloads are deployed in this namespace, they will not have the Istio sidecar. - -# Remove the Istio Sidecar from a Workload - -Disable Istio in the namespace, then redeploy the workloads with in it. They will be deployed without the Istio sidecar. \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/rbac/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/rbac/_index.md deleted file mode 100644 index 05a9a3db0..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/rbac/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Role-based Access Control -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/rbac - - /rancher/v2.x/en/istio/legacy/rbac ---- - -This section describes the permissions required to access Istio features and how to configure access to the Kiali and Jaeger visualizations. - -# Cluster-level Access - -By default, only cluster administrators can: - -- Enable Istio for the cluster -- Configure resource allocations for Istio -- View each UI for Prometheus, Grafana, Kiali, and Jaeger - -# Project-level Access - -After Istio is enabled in a cluster, project owners and members have permission to: - -- Enable and disable Istio sidecar auto-injection for namespaces -- Add the Istio sidecar to workloads -- View the traffic metrics and traffic graph for the cluster -- View the Kiali and Jaeger visualizations if cluster administrators give access to project members -- Configure Istio's resources (such as the gateway, destination rules, or virtual services) with `kubectl` (This does not apply to read-only project members) - -# Access to Visualizations - -By default, the Kiali and Jaeger visualizations are restricted to the cluster owner because the information in them could be sensitive. - -**Jaeger** provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. - -**Kiali** provides a diagram that shows the services within a service mesh and how they are connected. - -Rancher supports giving groups permission to access Kiali and Jaeger, but not individuals. - -To configure who has permission to access the Kiali and Jaeger UI, - -1. Go to the cluster view and click **Tools > Istio.** -1. Then go to the **Member Access** section. If you want to restrict access to certain groups, choose **Allow cluster owner and specified members to access Kiali and Jaeger UI.** Search for the groups that you want to have access to Kiali and Jaeger. If you want all members to have access to the tools, click **Allow all members to access Kiali and Jaeger UI.** -1. Click **Save.** - -**Result:** The access levels for Kiali and Jaeger have been updated. - -# Summary of Default Permissions for Istio Users - -| Permission | Cluster Administrators | Project Owners | Project Members | Read-only Project Members | -|------------------------------------------|----------------|----------------|-----------------|---------------------------| -| Enable and disable Istio for the cluster | ✓ | | | | -| Configure Istio resource limits | ✓ | | | | -| Control who has access to Kiali and the Jaeger UI | ✓ | | | | -| Enable and disable Istio for a namespace | ✓ | ✓ | ✓ | | -| Enable and disable Istio on workloads | ✓ | ✓ | ✓ | | -| Configure Istio with `kubectl` | ✓ | ✓ | ✓ | | -| View Prometheus UI and Grafana UI | ✓ | | | | -| View Kiali UI and Jaeger UI ([Configurable](#access-to-visualizations)) | ✓ | | | | -| View Istio project dashboard, including traffic metrics* | ✓ | ✓ | ✓ | ✓ | - -* By default, only the cluster owner will see the traffic graph. Project members will see only a subset of traffic metrics. Project members cannot see the traffic graph because it comes from Kiali, and access to Kiali is restricted to cluster owners by default. \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/release-notes/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/release-notes/_index.md deleted file mode 100644 index 4c17bf283..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/release-notes/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Release Notes -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/release-notes - - /rancher/v2.x/en/istio/legacy/release-notes ---- - - -# Istio 1.5.8 - -### Important note on 1.5.x versions - -When upgrading from any 1.4 version of Istio to any 1.5 version, the Rancher installer will delete several resources in order to complete the upgrade, at which point they will be immediately re-installed. This includes the `istio-reader-service-account`. If your Istio installation is using this service account be aware that any secrets tied to the service account will be deleted. Most notably this will **break specific [multi-cluster deployments](https://archive.istio.io/v1.4/docs/setup/install/multicluster/)**. Downgrades back to 1.4 are not possible. - -See the official upgrade notes for additional information on the 1.5 release and upgrading from 1.4: https://istio.io/latest/news/releases/1.5.x/announcing-1.5/upgrade-notes/ - -> **Note:** Rancher continues to use the Helm installation method, which produces a different architecture from an istioctl installation. - -### Known Issues - -* The Kiali traffic graph is currently not working [#24924](https://github.com/istio/istio/issues/24924) diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/resources/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/resources/_index.md deleted file mode 100644 index babbb1229..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/resources/_index.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: CPU and Memory Allocations -weight: 1 -aliases: - - /rancher/v2.x/en/project-admin/istio/configuring-resource-allocations/ - - /rancher/v2.x/en/project-admin/istio/config/ - - /rancher/v2.x/en/cluster-admin/tools/istio/resources - - /rancher/v2.x/en/istio/legacy/resources ---- -_Available as of v2.3.0_ - -This section describes the minimum recommended computing resources for the Istio components in a cluster. - -The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. - -> **Tip:** In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each central Istio component. - -In Kubernetes, the resource request indicates that the workload will not deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) - -Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ----------|-----------|---------------|---------------|-------------|-------------|------------- -istio-pilot |discovery| 500m | 2048Mi | 1000m | 4096Mi | Y - istio-telemetry |mixer| 1000m | 1024Mi | 4800m | 4096Mi | Y - istio-policy | mixer | 1000m | 1024Mi | 4800m | 4096Mi | Y - istio-tracing | jaeger | 100m | 100Mi | 500m | 1024Mi | Y - prometheus | prometheus | 750m | 750Mi | 1000m | 1024Mi | Y - grafana | grafana | 100m | 100Mi | 200m | 512Mi | Y - Others | - | 500m | 500Mi | - | - | N - **Total** | **-** | **3950m** | **5546Mi** | **>12300m** | **>14848Mi** | **-** - - -# Configuring Resource Allocations - -You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. - -To make it easier to schedule the workloads to a node, a cluster administrator can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. - -You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/docs/concepts/what-is-istio). - -To configure the resources allocated to an Istio component, - -1. In Rancher, go to the cluster where you have Istio installed. -1. Click **Tools > Istio.** This opens the Istio configuration page. -1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. -1. Click **Save.** - -**Result:** The resource allocations for the Istio components are updated. - -## Pilot - -[Pilot](https://istio.io/docs/ops/deployment/architecture/#pilot) provides the following: - -- Authentication configuration -- Service discovery for the Envoy sidecars -- Traffic management capabilities for intelligent routing (A/B tests and canary rollouts) -- Configuration for resiliency (timeouts, retries, circuit breakers, etc) - -For more information on Pilot, refer to the [documentation](https://istio.io/docs/concepts/traffic-management/#pilot-and-envoy). - -Option | Description| Required | Default --------|------------|-------|------- -Pilot CPU Limit | CPU resource limit for the istio-pilot pod.| Yes | 1000 -Pilot CPU Reservation | CPU reservation for the istio-pilot pod. | Yes | 500 -Pilot Memory Limit | Memory resource limit for the istio-pilot pod. | Yes | 4096 -Pilot Memory Reservation | Memory resource requests for the istio-pilot pod. | Yes | 2048 -Trace sampling Percentage | [Trace sampling percentage](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/#trace-sampling) | Yes | 1 -Pilot Selector | Ability to select the nodes in which istio-pilot pod is deployed to. To use this option, the nodes must have labels. | No | n/a - -## Mixer - -[Mixer](https://istio.io/docs/ops/deployment/architecture/#mixer) enforces access control and usage policies across the service mesh. It also integrates with plugins for monitoring tools such as Prometheus. The Envoy sidecar proxy passes telemetry data and monitoring data to Mixer, and Mixer passes the monitoring data to Prometheus. - -For more information on Mixer, policies and telemetry, refer to the [documentation](https://istio.io/docs/concepts/policies-and-telemetry/). - -Option | Description| Required | Default --------|------------|-------|------- -Mixer Telemetry CPU Limit | CPU resource limit for the istio-telemetry pod.| Yes | 4800 -Mixer Telemetry CPU Reservation | CPU reservation for the istio-telemetry pod.| Yes | 1000 -Mixer Telemetry Memory Limit | Memory resource limit for the istio-telemetry pod.| Yes | 4096 -Mixer Telemetry Memory Reservation | Memory resource requests for the istio-telemetry pod.| Yes | 1024 -Enable Mixer Policy | Whether or not to deploy the istio-policy. | Yes | False -Mixer Policy CPU Limit | CPU resource limit for the istio-policy pod. | Yes, when policy enabled | 4800 -Mixer Policy CPU Reservation | CPU reservation for the istio-policy pod. | Yes, when policy enabled | 1000 -Mixer Policy Memory Limit | Memory resource limit for the istio-policy pod. | Yes, when policy enabled | 4096 -Mixer Policy Memory Reservation | Memory resource requests for the istio-policy pod. | Yes, when policy enabled | 1024 -Mixer Selector | Ability to select the nodes in which istio-policy and istio-telemetry pods are deployed to. To use this option, the nodes must have labels. | No | n/a - -## Tracing - -[Distributed tracing](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/) enables users to track a request through a service mesh. This makes it easier to troubleshoot problems with latency, parallelism and serialization. - -Option | Description| Required | Default --------|------------|-------|------- -Enable Tracing | Whether or not to deploy the istio-tracing. | Yes | True -Tracing CPU Limit | CPU resource limit for the istio-tracing pod. | Yes | 500 -Tracing CPU Reservation | CPU reservation for the istio-tracing pod. | Yes | 100 -Tracing Memory Limit | Memory resource limit for the istio-tracing pod. | Yes | 1024 -Tracing Memory Reservation | Memory resource requests for the istio-tracing pod. | Yes | 100 -Tracing Selector | Ability to select the nodes in which tracing pod is deployed to. To use this option, the nodes must have labels. | No | n/a - -## Ingress Gateway - -The Istio gateway allows Istio features such as monitoring and route rules to be applied to traffic entering the cluster. This gateway is a prerequisite for outside traffic to make requests to Istio. - -For more information, refer to the [documentation](https://istio.io/docs/tasks/traffic-management/ingress/). - -Option | Description| Required | Default --------|------------|-------|------- -Enable Ingress Gateway | Whether or not to deploy the istio-ingressgateway. | Yes | False -Service Type of Istio Ingress Gateway | How to expose the gateway. You can choose NodePort or Loadbalancer | Yes | NodePort -Http2 Port | The NodePort for http2 requests | Yes | 31380 -Https Port | The NodePort for https requests | Yes | 31390 -Load Balancer IP | Ingress Gateway Load Balancer IP | No | n/a -Load Balancer Source Ranges | Ingress Gateway Load Balancer Source Ranges | No | n/a -Ingress Gateway CPU Limit | CPU resource limit for the istio-ingressgateway pod. | Yes | 2000 -Ingress Gateway CPU Reservation | CPU reservation for the istio-ingressgateway pod. | Yes | 100 -Ingress Gateway Memory Limit | Memory resource limit for the istio-ingressgateway pod. | Yes | 1024 -Ingress Gateway Memory Reservation | Memory resource requests for the istio-ingressgateway pod. | Yes | 128 -Ingress Gateway Selector | Ability to select the nodes in which istio-ingressgateway pod is deployed to. To use this option, the nodes must have labels. | No | n/a - -## Prometheus - -You can query for Istio metrics using Prometheus. Prometheus is an open-source systems monitoring and alerting toolkit. - -Option | Description| Required | Default --------|------------|-------|------- -Prometheus CPU Limit | CPU resource limit for the Prometheus pod.| Yes | 1000 -Prometheus CPU Reservation | CPU reservation for the Prometheus pod.| Yes | 750 -Prometheus Memory Limit | Memory resource limit for the Prometheus pod.| Yes | 1024 -Prometheus Memory Reservation | Memory resource requests for the Prometheus pod.| Yes | 750 -Retention for Prometheus | How long your Prometheus instance retains data | Yes | 6 -Prometheus Selector | Ability to select the nodes in which Prometheus pod is deployed to. To use this option, the nodes must have labels.| No | n/a - -## Grafana - -You can visualize metrics with Grafana. Grafana lets you visualize Istio traffic data scraped by Prometheus. - -Option | Description| Required | Default --------|------------|-------|------- -Enable Grafana | Whether or not to deploy the Grafana.| Yes | True -Grafana CPU Limit | CPU resource limit for the Grafana pod.| Yes, when Grafana enabled | 200 -Grafana CPU Reservation | CPU reservation for the Grafana pod.| Yes, when Grafana enabled | 100 -Grafana Memory Limit | Memory resource limit for the Grafana pod.| Yes, when Grafana enabled | 512 -Grafana Memory Reservation | Memory resource requests for the Grafana pod.| Yes, when Grafana enabled | 100 -Grafana Selector | Ability to select the nodes in which Grafana pod is deployed to. To use this option, the nodes must have labels. | No | n/a -Enable Persistent Storage for Grafana | Enable Persistent Storage for Grafana | Yes, when Grafana enabled | False -Source | Use a Storage Class to provision a new persistent volume or Use an existing persistent volume claim | Yes, when Grafana enabled and enabled PV | Use SC -Storage Class | Storage Class for provisioning PV for Grafana | Yes, when Grafana enabled, enabled PV and use storage class | Use the default class -Persistent Volume Size | The size for the PV you would like to provision for Grafana | Yes, when Grafana enabled, enabled PV and use storage class | 5Gi -Existing Claim | Use existing PVC for Grafana | Yes, when Grafana enabled, enabled PV and use existing PVC | n/a diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/_index.md deleted file mode 100644 index 8ef4d73aa..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Setup Guide -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup - - /rancher/v2.x/en/istio/legacy/setup ---- - -This section describes how to enable Istio and start using it in your projects. - -This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. - -If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. - -> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) - -1. [Enable Istio in the cluster.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) -1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) -1. [Select the nodes where the main Istio components will be deployed.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) -1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) -1. [Set up the Istio gateway. ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) -1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) -1. [Generate traffic and see Istio in action.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/#view-traffic) - -# Prerequisites - -This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.x/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning) on which you will install Istio. - -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) - -The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads/_index.md deleted file mode 100644 index 78fd1b6e6..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads/_index.md +++ /dev/null @@ -1,325 +0,0 @@ ---- -title: 4. Add Deployments and Services with the Istio Sidecar -weight: 4 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads - - /rancher/v2.x/en/istio/legacy/setup/deploy-workloads ---- - -> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. - -Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. - -To inject the Istio sidecar on an existing workload in the namespace, go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. - -Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see istio-init and istio-proxy alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. - -### 3. Add Deployments and Services - -Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. - -1. Go to the project inside the cluster you want to deploy the workload on. -1. In Workloads, click **Import YAML.** -1. Copy the below resources into the form. -1. Click **Import.** - -This will set up the following sample resources from Istio's example BookInfo app: - -Details service and deployment: - -- A `details` Service -- A ServiceAccount for `bookinfo-details` -- A `details-v1` Deployment - -Ratings service and deployment: - -- A `ratings` Service -- A ServiceAccount for `bookinfo-ratings` -- A `ratings-v1` Deployment - -Reviews service and deployments (three versions): - -- A `reviews` Service -- A ServiceAccount for `bookinfo-reviews` -- A `reviews-v1` Deployment -- A `reviews-v2` Deployment -- A `reviews-v3` Deployment - -Productpage service and deployment: - -This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. - -- A `productpage` service -- A ServiceAccount for `bookinfo-productpage` -- A `productpage-v1` Deployment - -### Resource YAML - -```yaml -# Copyright 2017 Istio Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################################################################################## -# Details service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: details - labels: - app: details - service: details -spec: - ports: - - port: 9080 - name: http - selector: - app: details ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-details ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: details-v1 - labels: - app: details - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: details - version: v1 - template: - metadata: - labels: - app: details - version: v1 - spec: - serviceAccountName: bookinfo-details - containers: - - name: details - image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Ratings service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: ratings - labels: - app: ratings - service: ratings -spec: - ports: - - port: 9080 - name: http - selector: - app: ratings ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-ratings ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ratings-v1 - labels: - app: ratings - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: ratings - version: v1 - template: - metadata: - labels: - app: ratings - version: v1 - spec: - serviceAccountName: bookinfo-ratings - containers: - - name: ratings - image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Reviews service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: reviews - labels: - app: reviews - service: reviews -spec: - ports: - - port: 9080 - name: http - selector: - app: reviews ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-reviews ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v1 - labels: - app: reviews - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v1 - template: - metadata: - labels: - app: reviews - version: v1 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v2 - labels: - app: reviews - version: v2 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v2 - template: - metadata: - labels: - app: reviews - version: v2 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v3 - labels: - app: reviews - version: v3 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v3 - template: - metadata: - labels: - app: reviews - version: v3 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Productpage services -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: productpage - labels: - app: productpage - service: productpage -spec: - ports: - - port: 9080 - name: http - selector: - app: productpage ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-productpage ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: productpage-v1 - labels: - app: productpage - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: productpage - version: v1 - template: - metadata: - labels: - app: productpage - version: v1 - spec: - serviceAccountName: bookinfo-productpage - containers: - - name: productpage - image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -``` - -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/_index.md deleted file mode 100644 index fea051bad..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 1. Enable Istio in the Cluster -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster - - /rancher/v2.x/en/istio/legacy/setup/enable-istio-in-cluster ---- - -This cluster uses the default Nginx controller to allow traffic into the cluster. - -A Rancher [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. - -> If the cluster has a Pod Security Policy enabled there are [prerequisites steps]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/) - -1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. -1. Click **Tools > Istio.** -1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. -1. Click **Enable**. -1. Click **Save**. - -**Result:** Istio is enabled at the cluster level. - -The Istio application, `cluster-istio`, is added as an application to the cluster's `system` project. - -When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. - -### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md deleted file mode 100644 index db7ba6719..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Enable Istio with Pod Security Policies -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.x/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp ---- - - >**Note:** The following guide is only for RKE provisioned clusters. - -If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. - -The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). - -- 1. [Configure the System Project Policy to allow Istio install.](#1-configure-the-system-project-policy-to-allow-istio-install) -- 2. [Install the CNI plugin in the System project.](#2-install-the-cni-plugin-in-the-system-project) -- 3. [Install Istio.](#3-install-istio) - -### 1. Configure the System Project Policy to allow Istio install - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Find the **Project: System** project and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click Save. - - -### 2. Install the CNI Plugin in the System Project - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Select the **Project: System** project. -1. Choose **Tools > Catalogs** in the navigation bar. -1. Add a catalog with the following: - 1. Name: istio-cni - 1. Catalog URL: https://github.com/istio/cni - 1. Branch: The branch that matches your current release, for example: `release-1.4`. -1. From the main menu select **Apps** -1. Click Launch and select istio-cni -1. Update the namespace to be "kube-system" -1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: - -``` ---- - logLevel: "info" - excludeNamespaces: - - "istio-system" - - "kube-system" -``` - -### 3. Install Istio - -Follow the [primary instructions]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/), adding a custom answer: `istio_cni.enabled: true`. - -After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/_index.md deleted file mode 100644 index b621b795f..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: 2. Enable Istio in a Namespace -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace - - /rancher/v2.x/en/istio/legacy/setup/enable-istio-in-namespace ---- - -You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. - -This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. - -> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio enabled. - -1. In the Rancher UI, go to the cluster view. Click the **Projects/Namespaces** tab. -1. Go to the namespace where you want to enable the Istio sidecar auto injection and click the **⋮.** -1. Click **Edit.** -1. In the **Istio sidecar auto injection** section, click **Enable.** -1. Click **Save.** - -**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. - -### Verifying that Automatic Istio Sidecar Injection is Enabled - -To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. - -### Excluding Workloads from Being Injected with the Istio Sidecar - -If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: - -``` -sidecar.istio.io/inject: “false” -``` - -To add the annotation to a workload, - -1. From the **Global** view, open the project that has the workload that should not have the sidecar. -1. Click **Resources > Workloads.** -1. Go to the workload that should not have the sidecar and click **⋮ > Edit.** -1. Click **Show Advanced Options.** Then expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. In the **Key** field, enter `sidecar.istio.io/inject`. -1. In the **Value** field, enter `false`. -1. Click **Save.** - -**Result:** The Istio sidecar will not be injected into the workload. - -> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. - - -### [Next: Select the Nodes ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/gateway/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/gateway/_index.md deleted file mode 100644 index 102042bcb..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/gateway/_index.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: 5. Set up the Istio Gateway -weight: 5 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway - - /rancher/v2.x/en/istio/legacy/setup/gateway ---- - -The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. - -You can use the NGINX ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. - -To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two ingresses. - -You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. - -You can route traffic into the service mesh with a load balancer or just Istio's NodePort gateway. This section describes how to set up the NodePort gateway. - -For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - -# Enable the Istio Gateway - -The ingress gateway is a Kubernetes service that will be deployed in your cluster. There is only one Istio gateway per cluster. - -1. Go to the cluster where you want to allow outside traffic into Istio. -1. Click **Tools > Istio.** -1. Expand the **Ingress Gateway** section. -1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/) -1. Optionally, configure the ports, service types, node selectors and tolerations, and resource requests and limits for this service. The default resource requests for CPU and memory are the minimum recommended resources. -1. Click **Save.** - -**Result:** The gateway is deployed, which allows Istio to receive traffic from outside the cluster. - -# Add a Kubernetes Gateway that Points to the Istio Gateway - -To allow traffic to reach Ingress, you will also need to provide a Kubernetes gateway resource in your YAML that points to Istio's implementation of the ingress gateway to the cluster. - -1. Go to the namespace where you want to deploy the Kubernetes gateway and click **Import YAML.** -1. Upload the gateway YAML as a file or paste it into the form. An example gateway YAML is provided below. -1. Click **Import.** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: bookinfo-gateway -spec: - selector: - istio: ingressgateway # use istio default controller - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "*" ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: bookinfo -spec: - hosts: - - "*" - gateways: - - bookinfo-gateway - http: - - match: - - uri: - exact: /productpage - - uri: - prefix: /static - - uri: - exact: /login - - uri: - exact: /logout - - uri: - prefix: /api/v1/products - route: - - destination: - host: productpage - port: - number: 9080 -``` - -**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. - -Confirm that the resource exists by running: -``` -kubectl get gateway -A -``` - -The result should be something like this: -``` -NAME AGE -bookinfo-gateway 64m -``` - -### Access the ProductPage Service from a Web Browser - -To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: - -`http://:/productpage` - -To get the ingress gateway URL and port, - -1. Go to the `System` project in your cluster. -1. Within the `System` project, go to `Resources` > `Workloads` then scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. -1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. - -**Result:** You should see the BookInfo app in the web browser. - -For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) - -# Troubleshooting - -The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. - -### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller - -You can try the steps in this section to make sure the Kubernetes gateway is configured properly. - -In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: - -1. Go to the `System` project in your cluster. -1. Within the `System` project, go to the namespace `istio-system`. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. -1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. - -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/_index.md deleted file mode 100644 index c88facca8..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: 3. Select the Nodes Where Istio Components Will be Deployed -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors - - /rancher/v2.x/en/istio/legacy/setup/node-selectors ---- - -> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) - -This section describes how use node selectors to configure Istio components to be deployed on a designated node. - -In larger deployments, it is strongly advised that Istio's infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -# Adding a Label to the Istio Node - -First, add a label to the node where Istio components should be deployed. This label can have any key-value pair. For this example, we will use the key `istio` and the value `enabled`. - -1. From the cluster view, go to the **Nodes** tab. -1. Go to a worker node that will host the Istio components and click **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Label.** -1. In the fields that appear, enter `istio` for the key and `enabled` for the value. -1. Click **Save.** - -**Result:** A worker node has the label that will allow you to designate it for Istio components. - -# Configuring Istio Components to Use the Labeled Node - -Configure each Istio component to be deployed to the node with the Istio label. Each Istio component can be configured individually, but in this tutorial, we will configure all of the components to be scheduled on the same node for the sake of simplicity. - -For larger deployments, it is recommended to schedule each component of Istio onto separate nodes. - -1. From the cluster view, click **Tools > Istio.** -1. Expand the **Pilot** section and click **Add Selector** in the form that appears. Enter the node selector label that you added to the Istio node. In our case, we are using the key `istio` and the value `enabled.` -1. Repeat the previous step for the **Mixer** and **Tracing** sections. -1. Click **Save.** - -**Result:** The Istio components will be deployed on the Istio node. - -### [Next: Add Deployments and Services]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management/_index.md deleted file mode 100644 index ef8c72475..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: 6. Set up Istio's Components for Traffic Management -weight: 6 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management - - /rancher/v2.x/en/istio/legacy/setup/set-up-traffic-management ---- - -A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - -- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. -- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. - -This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. - -In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. - -After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. - -To deploy the virtual service and destination rules for the `reviews` service, - -1. Go to the project view and click **Import YAML.** -1. Copy resources below into the form. -1. Click **Import.** - -``` -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: reviews -spec: - hosts: - - reviews - http: - - route: - - destination: - host: reviews - subset: v1 - weight: 50 - - destination: - host: reviews - subset: v3 - weight: 50 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: DestinationRule -metadata: - name: reviews -spec: - host: reviews - subsets: - - name: v1 - labels: - version: v1 - - name: v2 - labels: - version: v2 - - name: v3 - labels: - version: v3 -``` -**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. - -### [Next: Generate and View Traffic]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic) diff --git a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/view-traffic/_index.md b/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/view-traffic/_index.md deleted file mode 100644 index dd4e908a2..000000000 --- a/content/rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/view-traffic/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: 7. Generate and View Traffic -weight: 7 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic - - /rancher/v2.x/en/istio/legacy/setup/view-traffic ---- - -This section describes how to view the traffic that is being managed by Istio. - -# The Kiali Traffic Graph - -Rancher integrates a Kiali graph into the Rancher UI. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. - -To see the traffic graph, - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. - -If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. - -For additional tools and visualizations, you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. - -# Viewing Traffic Metrics - -Istio’s monitoring features provide visibility into the performance of all your services. - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/_index.md b/content/rancher/v2.x/en/istio/v2.5/_index.md deleted file mode 100644 index 36dc5ed10..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/_index.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Istio in Rancher v2.5 -shortTitle: Rancher v2.5 -weight: 1 ---- - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. - -As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. - -Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to a team of developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -This core service mesh provides features that include but are not limited to the following: - -- **Traffic Management** such as ingress and egress routing, circuit breaking, mirroring. -- **Security** with resources to authenticate and authorize traffic and users, mTLS included. -- **Observability** of logs, metrics, and distributed traffic flows. - -After [setting up istio]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) you can leverage Istio's control plane functionality through the Cluster Explorer, `kubectl`, or `istioctl`. - -Istio needs to be set up by a `cluster-admin` before it can be used in a project. - -- [What's New in Rancher v2.5](#what-s-new-in-rancher-v2-5) -- [Tools Bundled with Istio](#tools-bundled-with-istio) -- [Prerequisites](#prerequisites) -- [Setup Guide](#setup-guide) -- [Remove Istio](#remove-istio) -- [Migrate from Previous Istio Version](#migrate-from-previous-istio-version) -- [Accessing Visualizations](#accessing-visualizations) -- [Architecture](#architecture) -- [Additional steps for installing Istio on an RKE2 cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) - -# What's New in Rancher v2.5 - -The overall architecture of Istio has been simplified. A single component, Istiod, has been created by combining Pilot, Citadel, Galley and the sidecar injector. Node Agent functionality has also been merged into istio-agent. - -Addons that were previously installed by Istio (cert-manager, Grafana, Jaeger, Kiali, Prometheus, Zipkin) will now need to be installed separately. Istio will support installation of integrations that are from the Istio Project and will maintain compatibility with those that are not. - -A Prometheus integration will still be available through an installation of [Rancher Monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/), or by installing your own Prometheus operator. Rancher's Istio chart will also install Kiali by default to ensure you can get a full picture of your microservices out of the box. - -Istio has migrated away from Helm as a way to install Istio and now provides installation through the istioctl binary or Istio Operator. To ensure the easiest interaction with Istio, Rancher's Istio will maintain a Helm chart that utilizes the istioctl binary to manage your Istio installation. - -This Helm chart will be available via the Apps and Marketplace in the UI. A user that has access to the Rancher Chart's catalog will need to set up Istio before it can be used in the project. - -# Tools Bundled with Istio - -Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy Helm chart, including an overlay file option to allow complex customization. - -It also includes the following: - -### Kiali - -Kiali is a comprehensive visualization aid used for graphing traffic flow throughout the service mesh. It allows you to see how they are connected, including the traffic rates and latencies between them. - -You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. - -### Jaeger - -_Bundled as of v2.5.4_ - -Our Istio installer includes a quick-start, all-in-one installation of [Jaeger,](https://www.jaegertracing.io/) a tool used for tracing distributed systems. - -Note that this is not a production-qualified deployment of Jaeger. This deployment uses an in-memory storage component, while a persistent storage component is recommended for production. For more information on which deployment strategy you may need, refer to the [Jaeger documentation.](https://www.jaegertracing.io/docs/latest/operator/#production-strategy) - -# Prerequisites - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) to run all of the components of Istio. - -If you are installing Istio on RKE2 cluster, some additional steps are required. For details, see [this section.](#additional-steps-for-installing-istio-on-an-rke2-cluster) - -# Setup Guide - -Refer to the [setup guide]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) for instructions on how to set up Istio and use it in a project. - -# Remove Istio - -To remove Istio components from a cluster, namespace, or workload, refer to the section on [uninstalling Istio.]({{}}/rancher/v2.x/en/istio/disabling-istio/) - -# Migrate From Previous Istio Version - -There is no upgrade path for Istio versions less than 1.7.x. To successfully install Istio in the **Cluster Explorer**, you will need to disable your existing Istio in the **Cluster Manager**. - -If you have a significant amount of additional Istio CRDs you might consider manually migrating CRDs that are supported in both versions of Istio. You can do this by running `kubectl get -n istio-system -o yaml`, save the output yaml and re-apply in the new version. - -Another option is to manually uninstall istio resources one at a time, but leave the resources that are supported in both versions of Istio and that will not be installed by the newest version. This method is more likely to result in issues installing the new version, but could be a good option depending on your situation. - -# Accessing Visualizations - -> By default, only cluster-admins have access to Kiali. For instructions on how to allow admin, edit or views roles to access them, see [this section.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/rbac/) - -After Istio is set up in a cluster, Grafana, Prometheus,and Kiali are available in the Rancher UI. - -To access the Grafana and Prometheus visualizations, from the **Cluster Explorer** navigate to the **Monitoring** app overview page, and click on **Grafana** or **Prometheus** - -To access the Kiali visualization, from the **Cluster Explorer** navigate to the **Istio** app overview page, and click on **Kiali**. From here you can access the **Traffic Graph** tab or the **Traffic Metrics** tab to see network visualizations and metrics. - -By default, all namespace will picked up by prometheus and make data available for Kiali graphs. Refer to [selector/scrape config setup](./configuration-reference/selectors-and-scrape) if you would like to use a different configuration for prometheus data scraping. - -Your access to the visualizations depend on your role. Grafana and Prometheus are only available for `cluster-admin` roles. The Kiali UI is available only to `cluster-admin` by default, but `cluster-admin` can allow other roles to access them by editing the Istio values.yaml. - -# Architecture - -Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. - -Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. - -When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. - -For more information on the Istio sidecar, refer to the [Istio sidecare-injection docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/) and for more information on Istio's architecture, refer to the [Istio Architecture docs](https://istio.io/latest/docs/ops/deployment/architecture/) - -### Multiple Ingresses - -By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. Istio also installs an ingress gateway by default into the `istio-system` namespace. The result is that your cluster will have two ingresses in your cluster. - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - - Additional Istio Ingress gateways can be enabled via the [overlay file]({{}}/rancher/v2.x/en/istio/setup/enable-istio-in-cluster/#overlay-file). - -### Egress Support - -By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file]({{}}/rancher/v2.x/en/istio/setup/enable-istio-in-cluster/#overlay-file). - -# Additional Steps for Installing Istio on an RKE2 Cluster - -To install Istio on an RKE2 cluster, follow the steps in [this section.]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/rke2/) diff --git a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/_index.md b/content/rancher/v2.x/en/istio/v2.5/configuration-reference/_index.md deleted file mode 100644 index b44986671..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Configuration Options -weight: 3 ---- - -- [Egress Support](#egress-support) -- [Enabling Automatic Sidecar Injection](#enabling-automatic-sidecar-injection) -- [Overlay File](#overlay-file) -- [Selectors and Scrape Configs](#selectors-and-scrape-configs) -- [Enable Istio with Pod Security Policies](#enable-istio-with-pod-security-policies) -- [Additional Steps for Installing Istio on an RKE2 Cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) -- [Additional Steps for Canal Network Plug-in with Project Network Isolation](#additional-steps-for-canal-network-plug-in-with-project-network-isolation) - -### Egress Support - -By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file]({{}}/rancher/v2.x/en/istio/setup/enable-istio-in-cluster/#overlay-file). - -### Enabling Automatic Sidecar Injection - -Automatic sidecar injection is disabled by default. To enable this, set the `sidecarInjectorWebhook.enableNamespacesByDefault=true` in the values.yaml on install or upgrade. This automatically enables Istio sidecar injection into all new namespaces that are deployed. - -### Overlay File - -An Overlay File is designed to support extensive configuration of your Istio installation. It allows you to make changes to any values available in the [IstioOperator API](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/). This will ensure you can customize the default installation to fit any scenario. - -The Overlay File will add configuration on top of the default installation that is provided from the Istio chart installation. This means you do not need to redefine the components that already defined for installation. - -For more information on Overlay Files, refer to the [Istio documentation.](https://istio.io/latest/docs/setup/install/istioctl/#configure-component-settings) - -### Selectors and Scrape Configs - -The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which enables monitoring across all namespaces by default. This ensures you can view traffic, metrics and graphs for resources deployed in a namespace with `istio-injection=enabled` label. - -If you would like to limit Prometheus to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. Once you do this, you will need to add additional configuration to continue to monitor your resources. - -For details, refer to [this section.](./selectors-and-scrape) - -### Enable Istio with Pod Security Policies - -Refer to [this section.](./enable-istio-with-psp) - -### Additional Steps for Installing Istio on an RKE2 Cluster - -Refer to [this section.](./rke2) - -### Additional Steps for Canal Network Plug-in with Project Network Isolation - -Refer to [this section.](./canal-and-project-network) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/canal-and-project-network/_index.md b/content/rancher/v2.x/en/istio/v2.5/configuration-reference/canal-and-project-network/_index.md deleted file mode 100644 index f2f87d8e9..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/canal-and-project-network/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Additional Steps for Canal Network Plug-in with Project Network Isolation -weight: 4 ---- - -In clusters where: - -- The Canal network plug-in is in use. -- The Project Network Isolation option is enabled. -- You install the Istio Ingress module - -The Istio Ingress Gateway pod won't be able to redirect ingress traffic to the workloads by default. This is because all the namespaces will be inaccessible from the namespace where Istio is installed. You have two options. - -The first option is to add a new Network Policy in each of the namespaces where you intend to have ingress controlled by Istio. Your policy should include the following lines: - -``` -- podSelector: - matchLabels: - app: istio-ingressgateway -``` - -The second option is to move the `istio-system` namespace to the `system` project, which by default is excluded from the network isolation. \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp/_index.md b/content/rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp/_index.md deleted file mode 100644 index 247baf1a8..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Enable Istio with Pod Security Policies -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.x/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/enable-istio-with-psp ---- - -If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. - -The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). - -The steps differ based on the Rancher version. - -{{% tabs %}} -{{% tab "v2.5.4+" %}} - -> **Prerequisites:** -> -> - The cluster must be an RKE Kubernetes cluster. -> - The cluster must have been created with a default PodSecurityPolicy. -> -> To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. - -1. [Set the PodSecurityPolicy to unrestricted](#1-set-the-podsecuritypolicy-to-unrestricted) -2. [Enable the CNI](#2-enable-the-cni) -3. [Verify that the CNI is working.](#3-verify-that-the-cni-is-working) - -### 1. Set the PodSecurityPolicy to unrestricted - -An unrestricted PSP allows Istio to be installed. - -Set the PSP to `unrestricted` in the project where is Istio is installed, or the project where you plan to install Istio. - -1. From the cluster view of the **Cluster Manager,** select **Projects/Namespaces.** -1. Find the **Project: System** and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click **Save.** - -### 2. Enable the CNI - -When installing or upgrading Istio through **Apps & Marketplace,** - -1. Click **Components.** -2. Check the box next to **Enabled CNI.** -3. Finish installing or upgrading Istio. - -The CNI can also be enabled by editing the `values.yaml`: - -``` -istio_cni.enabled: true -``` - -Istio should install successfully with the CNI enabled in the cluster. - -### 3. Verify that the CNI is working - -Verify that the CNI is working by deploying a [sample application](https://istio.io/latest/docs/examples/bookinfo/) or deploying one of your own applications. - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.3" %}} - -> **Prerequisites:** -> -> - The cluster must be an RKE Kubernetes cluster. -> - The cluster must have been created with a default PodSecurityPolicy. -> -> To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. - -1. [Configure the System Project Policy to allow Istio install.](#1-configure-the-system-project-policy-to-allow-istio-install) -2. [Install the CNI plugin in the System project.](#2-install-the-cni-plugin-in-the-system-project) -3. [Install Istio.](#3-install-istio) - -### 1. Configure the System Project Policy to allow Istio install - -1. From the cluster view of the **Cluster Manager,** select **Projects/Namespaces.** -1. Find the **Project: System** and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click Save. - -### 2. Install the CNI Plugin in the System Project - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Select the **Project: System** project. -1. Choose **Tools > Catalogs** in the navigation bar. -1. Add a catalog with the following: - 1. Name: istio-cni - 1. Catalog URL: https://github.com/istio/cni - 1. Branch: The branch that matches your current release, for example: `release-1.4`. -1. From the main menu select **Apps** -1. Click Launch and select istio-cni -1. Update the namespace to be "kube-system" -1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: - -``` ---- - logLevel: "info" - excludeNamespaces: - - "istio-system" - - "kube-system" -``` - -### 3. Install Istio - -Follow the [primary instructions]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/), adding a custom answer: `istio_cni.enabled: true`. - -After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. - -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/rke2/_index.md b/content/rancher/v2.x/en/istio/v2.5/configuration-reference/rke2/_index.md deleted file mode 100644 index 7f6bab7fb..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/rke2/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Additional Steps for Installing Istio on an RKE2 Cluster -weight: 3 ---- - -Through the **Cluster Explorer,** when installing or upgrading Istio through **Apps & Marketplace,** - -1. Click **Components.** -1. Check the box next to **Enabled CNI.** -1. Add a custom overlay file specifying `cniBinDir` and `cniConfDir`. For more information on these options, refer to the [Istio documentation.](https://istio.io/latest/docs/setup/additional-setup/cni/#helm-chart-parameters) An example is below: - - ```yaml - apiVersion: install.istio.io/v1alpha1 - kind: IstioOperator - spec: - components: - cni: - enabled: true - values: - cni: - image: rancher/istio-install-cni:1.7.3 - excludeNamespaces: - - istio-system - - kube-system - logLevel: info - cniBinDir: /opt/cni/bin - cniConfDir: /etc/cni/net.d - ``` -1. After installing Istio, you'll notice the cni-node pods in the istio-system namespace in a CrashLoopBackoff error. Manually edit the `istio-cni-node` daemonset to include the following on the `install-cni` container: - ```yaml - securityContext: - privileged: true - ``` - -**Result:** Now you should be able to utilize Istio as desired, including sidecar injection and monitoring via Kiali. diff --git a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/selectors-and-scrape/_index.md b/content/rancher/v2.x/en/istio/v2.5/configuration-reference/selectors-and-scrape/_index.md deleted file mode 100644 index 253bcd591..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/configuration-reference/selectors-and-scrape/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Selectors and Scrape Configs -weight: 2 ---- - -The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false`, which enables monitoring across all namespaces by default. - -This ensures you can view traffic, metrics and graphs for resources deployed in a namespace with `istio-injection=enabled` label. - -If you would like to limit Prometheus to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. Once you do this, you will need to add additional configuration to continue to monitor your resources. - -- [Limiting Monitoring to Specific Namespaces by Setting ignoreNamespaceSelectors to True](#limiting-monitoring-to-specific-namespaces-by-setting-ignorenamespaceselectors-to-true) -- [Enabling Prometheus to Detect Resources in Other Namespaces](#enabling-prometheus-to-detect-resources-in-other-namespaces) -- [Monitoring Specific Namespaces: Create a Service Monitor or Pod Monitor](#monitoring-specific-namespaces-create-a-service-monitor-or-pod-monitor) -- [Monitoring Across Namespaces: Set ignoreNamespaceSelectors to False](#monitoring-across-namespaces-set-ignorenamespaceselectors-to-false) - -### Limiting Monitoring to Specific Namespaces by Setting ignoreNamespaceSelectors to True - -This limits monitoring to specific namespaces. - -1. From the **Cluster Explorer**, navigate to **Installed Apps** if Monitoring is already installed, or **Charts** in **Apps & Marketplace** -1. If starting a new install, **Click** the **rancher-monitoring** chart, then in **Chart Options** click **Edit as Yaml**. -1. If updating an existing installation, click on **Upgrade**, then in **Chart Options** click **Edit as Yaml**. -1. Set`prometheus.prometheusSpec.ignoreNamespaceSelectors=true` -1. Complete install or upgrade - -**Result:** Prometheus will be limited to specific namespaces which means one of the following configurations will need to be set up to continue to view data in various dashboards - -### Enabling Prometheus to Detect Resources in Other Namespaces - -There are two different ways to enable Prometheus to detect resources in other namespaces when `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`: - -- **Monitoring specific namespaces:** Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. -- **Monitoring across namespaces:** Add an `additionalScrapeConfig` to your rancher-monitoring instance to scrape all targets in all namespaces. - -### Monitoring Specific Namespaces: Create a Service Monitor or Pod Monitor - -This option allows you to define which specific services or pods you would like monitored in a specific namespace. - -The usability tradeoff is that you have to create the service monitor or pod monitor per namespace since you cannot monitor across namespaces. - -> **Prerequisite:** Define a ServiceMonitor or PodMonitor for ``. An example ServiceMonitor is provided below. - -1. From the **Cluster Explorer**, open the kubectl shell -1. Run `kubectl create -f .yaml` if the file is stored locally in your cluster. -1. Or run `cat<< EOF | kubectl apply -f -`, paste the file contents into the terminal, then run `EOF` to complete the command. -1. If starting a new install, **Click** the **rancher-monitoring** chart and scroll down to **Preview Yaml**. -1. Run `kubectl label namespace istio-injection=enabled` to enable the envoy sidecar injection - -**Result:** `` can be scraped by prometheus. - -
Example Service Monitor for Istio Proxies
- -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: envoy-stats-monitor - namespace: istio-system - labels: - monitoring: istio-proxies -spec: - selector: - matchExpressions: - - {key: istio-prometheus-ignore, operator: DoesNotExist} - namespaceSelector: - any: true - jobLabel: envoy-stats - endpoints: - - path: /stats/prometheus - targetPort: 15090 - interval: 15s - relabelings: - - sourceLabels: [__meta_kubernetes_pod_container_port_name] - action: keep - regex: '.*-envoy-prom' - - action: labeldrop - regex: "__meta_kubernetes_pod_label_(.+)" - - sourceLabels: [__meta_kubernetes_namespace] - action: replace - targetLabel: namespace - - sourceLabels: [__meta_kubernetes_pod_name] - action: replace - targetLabel: pod_name -``` - -### Monitoring across namespaces: Set ignoreNamespaceSelectors to False - -This enables monitoring across namespaces by giving Prometheus additional scrape configurations. - -The usability tradeoff is that all of Prometheus' `additionalScrapeConfigs` are maintained in a single Secret. This could make upgrading difficult if monitoring is already deployed with additionalScrapeConfigs before installing Istio. - -1. If starting a new install, **Click** the **rancher-monitoring** chart, then in **Chart Options** click **Edit as Yaml**. -1. If updating an existing installation, click on **Upgrade**, then in **Chart Options** click **Edit as Yaml**. -1. If updating an existing installation, click on **Upgrade** and then **Preview Yaml**. -1. Set`prometheus.prometheusSpec.additionalScrapeConfigs` array to the **Additional Scrape Config** provided below. -1. Complete install or upgrade - -**Result:** All namespaces with the `istio-injection=enabled` label will be scraped by prometheus. - -
Additional Scrape Config
- -``` yaml -- job_name: 'istio/envoy-stats' - scrape_interval: 15s - metrics_path: /stats/prometheus - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: [__meta_kubernetes_pod_container_port_name] - action: keep - regex: '.*-envoy-prom' - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:15090 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod_name -``` diff --git a/content/rancher/v2.x/en/istio/v2.5/disabling-istio/_index.md b/content/rancher/v2.x/en/istio/v2.5/disabling-istio/_index.md deleted file mode 100644 index bcbe11712..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/disabling-istio/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Disabling Istio -weight: 4 -aliases: - - /rancher/v2.x/en/istio/disabling-istio ---- - -This section describes how to uninstall Istio in a cluster or disable a namespace, or workload. - -# Uninstall Istio in a Cluster - -To uninstall Istio, - -1. From the **Cluster Explorer,** navigate to **Installed Apps** in **Apps & Marketplace** and locate the `rancher-istio` installation. -1. Select all the apps in the `istio-system` namespace and click **Delete**. - -**Result:** The `rancher-istio` app in the cluster gets removed. The Istio sidecar cannot be deployed on any workloads in the cluster. - -**Note:** You can no longer disable and reenable your Istio installation. If you would like to save your settings for a future install, view and save individual YAMLs to refer back to / reuse for future installations. - -# Disable Istio in a Namespace - -1. From the **Cluster Explorer** view, use the side-nav to select **Namespaces** page -1. On the **Namespace** page, you will see a list of namespaces. Go to the namespace where you want to disable and click the select **Edit as Form** or **Edit as Yaml** -1. Remove the `istio-injection=enabled` label from the namespace -1. Click **Save** - -**Result:** When workloads are deployed in this namespace, they will not have the Istio sidecar. - -# Remove the Istio Sidecar from a Workload - -Disable Istio in the namespace, then redeploy the workloads with in it. They will be deployed without the Istio sidecar. diff --git a/content/rancher/v2.x/en/istio/v2.5/rbac/_index.md b/content/rancher/v2.x/en/istio/v2.5/rbac/_index.md deleted file mode 100644 index 852ac33c6..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/rbac/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Role-based Access Control -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/rbac - - /rancher/v2.x/en/istio/rbac ---- - -This section describes the permissions required to access Istio features. - -The rancher istio chart installs three `ClusterRoles` - -## Cluster-Admin Access - -By default, only those with the `cluster-admin` `ClusterRole` can: - -- Install istio app in a cluster -- Configure resource allocations for Istio - - -## Admin and Edit access - -By default, only Admin and Edit roles can: - -- Enable and disable Istio sidecar auto-injection for namespaces -- Add the Istio sidecar to workloads -- View the traffic metrics and traffic graph for the cluster -- Configure Istio's resources (such as the gateway, destination rules, or virtual services) - -## Summary of Default Permissions for Kubernetes Default roles - -Istio creates three `ClusterRoles` and adds Istio CRD access to the following default K8s `ClusterRole`: - -ClusterRole create by chart | Default K8s ClusterRole | Rancher Role | - ------------------------------:| ---------------------------:|---------:| - `istio-admin` | admin| Project Owner | - `istio-edit`| edit | Project Member | - `istio-view` | view | Read-only | - -Rancher will continue to use cluster-owner, cluster-member, project-owner, project-member, etc as role names, but will utilize default roles to determine access. For each default K8s `ClusterRole` there are different Istio CRD permissions and K8s actions (Create ( C ), Get ( G ), List ( L ), Watch ( W ), Update ( U ), Patch ( P ), Delete( D ), All ( * )) that can be performed. - - -|CRDs | Admin | Edit | View -|----------------------------| ------| -----| ----- -|
  • `config.istio.io`
    • `adapters`
    • `attributemanifests`
    • `handlers`
    • `httpapispecbindings`
    • `httpapispecs`
    • `instances`
    • `quotaspecbindings`
    • `quotaspecs`
    • `rules`
    • `templates`
| GLW | GLW | GLW -|
  • `networking.istio.io`
    • `destinationrules`
    • `envoyfilters`
    • `gateways`
    • `serviceentries`
    • `sidecars`
    • `virtualservices`
    • `workloadentries`
| * | * | GLW -|
  • `security.istio.io`
    • `authorizationpolicies`
    • `peerauthentications`
    • `requestauthentications`
| * | * | GLW \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/release-notes/_index.md b/content/rancher/v2.x/en/istio/v2.5/release-notes/_index.md deleted file mode 100644 index 5ca589330..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/release-notes/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Release Notes -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/release-notes - - /rancher/v2.x/en/istio/release-notes ---- - -# Istio 1.5.9 release notes - -**Bug fixes** - -* The Kiali traffic graph is now working [#28109](https://github.com/rancher/rancher/issues/28109) - -**Known Issues** - -* The Kiali traffic graph is offset in the UI [#28207](https://github.com/rancher/rancher/issues/28207) - - -# Istio 1.5.8 - -### Important note on 1.5.x versions - -When upgrading from any 1.4 version of Istio to any 1.5 version, the Rancher installer will delete several resources in order to complete the upgrade, at which point they will be immediately re-installed. This includes the `istio-reader-service-account`. If your Istio installation is using this service account be aware that any secrets tied to the service account will be deleted. Most notably this will **break specific [multi-cluster deployments](https://archive.istio.io/v1.4/docs/setup/install/multicluster/)**. Downgrades back to 1.4 are not possible. - -See the official upgrade notes for additional information on the 1.5 release and upgrading from 1.4: https://istio.io/latest/news/releases/1.5.x/announcing-1.5/upgrade-notes/ - -> **Note:** Rancher continues to use the Helm installation method, which produces a different architecture from an istioctl installation. - -### Known Issues - -* The Kiali traffic graph is currently not working [#24924](https://github.com/istio/istio/issues/24924) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/resources/_index.md b/content/rancher/v2.x/en/istio/v2.5/resources/_index.md deleted file mode 100644 index 1f82cfcaa..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/resources/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: CPU and Memory Allocations -weight: 1 -aliases: - - /rancher/v2.x/en/project-admin/istio/configuring-resource-allocations/ - - /rancher/v2.x/en/project-admin/istio/config/ - - /rancher/v2.x/en/cluster-admin/tools/istio/resources - - /rancher/v2.x/en/istio/resources ---- -_This section applies to Istio in Rancher v2.5.x. If you are using Rancher v2.4.x, refer to [this section.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/)_ - -This section describes the minimum recommended computing resources for the Istio components in a cluster. - -The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. - -> **Tip:** In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each core Istio component. - -In Kubernetes, the resource request indicates that the workload will not deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) - -{{% tabs %}} -{{% tab "v2.5.6+" %}} - -| Workload | CPU - Request | Memory - Request | CPU - Limit | Memory - Limit | -|----------------------|---------------|------------|-----------------|-------------------| -| ingress gateway | 100m | 128mi | 2000m | 1024mi | -| egress gateway | 100m | 128mi | 2000m | 1024mi | -| istiod | 500m | 2048mi | No limit | No limit | -| proxy | 10m | 10mi | 2000m | 1024mi | -| **Totals:** | **710m** | **2314Mi** | **6000m** | **3072Mi** | - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.5" %}} - -Workload | CPU - Request | Memory - Request | CPU - Limit | Mem - Limit | Configurable ----------:|---------------:|---------------:|-------------:|-------------:|-------------: -Istiod | 500m | 2048Mi | No limit | No limit | Y | -Istio-Mixer | 1000m | 1000Mi | 4800m | 4000Mi | Y | -Istio-ingressgateway | 100m | 128Mi | 2000m | 1024Mi | Y | -Others | 10m | - | - | - | Y | -Totals: | 1710m | 3304Mi | >8800m | >6048Mi | - - -{{% /tab %}} -{{% /tabs %}} - - - - -# Configuring Resource Allocations - -You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. - -To make it easier to schedule the workloads to a node, a cluster-admin can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. - -You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/). - -To configure the resources allocated to an Istio component, - -1. In the Rancher **Cluster Explorer**, navigate to your Istio installation in **Apps & Marketplace** -1. Click **Upgrade** to edit the base components via changes the values.yaml or add an [overlay file]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/#overlay-file). For more information about editing the overlay file, see [this section.](./#editing-the-overlay-file) -1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. -1. Click **Upgrade.** to rollout changes - -**Result:** The resource allocations for the Istio components are updated. - - -### Editing the Overlay File - -The overlay file can contain any of the values in the [Istio Operator spec.](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec) The overlay file included with the Istio application is just one example of a potential configuration of the overlay file. - -As long as the file contains `kind: IstioOperator` and the YAML options are valid, the file can be used as an overlay. - -In the example overlay file provided with the Istio application, the following section allows you to change Kubernetes resources: - -``` -# k8s: -# resources: -# requests: -# cpu: 200m -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/_index.md deleted file mode 100644 index eb9801f19..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Setup Guide -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup ---- - -This section describes how to enable Istio and start using it in your projects. - -If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. - -# Prerequisites - -This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.x/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning) on which you will install Istio. - -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) - -The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) - - -# Install - -> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) - -1. [Enable Istio in the cluster.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) -1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) -1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) -1. [Set up the Istio gateway. ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) -1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) -1. [Generate traffic and see Istio in action.]({{}}/rancher/v2.x/en/istio/v2.5/setup/view-traffic/ ) diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/deploy-workloads/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/deploy-workloads/_index.md deleted file mode 100644 index 6d1a6995b..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/deploy-workloads/_index.md +++ /dev/null @@ -1,350 +0,0 @@ ---- -title: 3. Add Deployments and Services with the Istio Sidecar -weight: 4 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads - - /rancher/v2.x/en/istio/setup/deploy-workloads ---- - -> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have the Istio app installed. - -Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. - -To inject the Istio sidecar on an existing workload in the namespace, from the **Cluster Explorer** go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. - -Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see `istio-proxy` alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. - -### Add Deployments and Services - -There are a few ways to add new **Deployments** in your namespace - -1. From the **Cluster Explorer** click on **Workload > Overview.** -1. Click **Create.** -1. Select **Deployment** from the various workload options. -1. Fill out the form, or **Edit as Yaml.** -1. Click **Create.** - -Alternatively, you can select the specific workload you want to deploy from the **Workload** section of the left navigation bar and create it from there. - -To add a **Service** to your namespace - -1. From the **Cluster Explorer** click on **Service Discovery > Services** -1. Click **Create** -1. Select the type of service you want to create from the various options -1. Fill out the form, or **Edit as Yaml** -1. Click **Create** - -You can also create deployments and services using the kubectl **shell** - -1. Run `kubectl create -f .yaml` if your file is stored locally in the cluster -1. Or run `cat<< EOF | kubectl apply -f -`, paste the file contents into the terminal, then run `EOF` to complete the command. - -### Example Deployments and Services - -Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. - -1. From the **Cluster Explorer**, open the kubectl **shell** -1. Run `cat<< EOF | kubectl apply -f -` -1. Copy the below resources into the the shell -1. Run `EOF` - -This will set up the following sample resources from Istio's example BookInfo app: - -Details service and deployment: - -- A `details` Service -- A ServiceAccount for `bookinfo-details` -- A `details-v1` Deployment - -Ratings service and deployment: - -- A `ratings` Service -- A ServiceAccount for `bookinfo-ratings` -- A `ratings-v1` Deployment - -Reviews service and deployments (three versions): - -- A `reviews` Service -- A ServiceAccount for `bookinfo-reviews` -- A `reviews-v1` Deployment -- A `reviews-v2` Deployment -- A `reviews-v3` Deployment - -Productpage service and deployment: - -This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. - -- A `productpage` service -- A ServiceAccount for `bookinfo-productpage` -- A `productpage-v1` Deployment - -### Resource YAML - -```yaml -# Copyright 2017 Istio Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################################################################################## -# Details service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: details - labels: - app: details - service: details -spec: - ports: - - port: 9080 - name: http - selector: - app: details ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-details ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: details-v1 - labels: - app: details - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: details - version: v1 - template: - metadata: - labels: - app: details - version: v1 - spec: - serviceAccountName: bookinfo-details - containers: - - name: details - image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Ratings service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: ratings - labels: - app: ratings - service: ratings -spec: - ports: - - port: 9080 - name: http - selector: - app: ratings ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-ratings ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ratings-v1 - labels: - app: ratings - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: ratings - version: v1 - template: - metadata: - labels: - app: ratings - version: v1 - spec: - serviceAccountName: bookinfo-ratings - containers: - - name: ratings - image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Reviews service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: reviews - labels: - app: reviews - service: reviews -spec: - ports: - - port: 9080 - name: http - selector: - app: reviews ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-reviews ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v1 - labels: - app: reviews - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v1 - template: - metadata: - labels: - app: reviews - version: v1 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v2 - labels: - app: reviews - version: v2 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v2 - template: - metadata: - labels: - app: reviews - version: v2 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v3 - labels: - app: reviews - version: v3 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v3 - template: - metadata: - labels: - app: reviews - version: v3 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Productpage services -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: productpage - labels: - app: productpage - service: productpage -spec: - ports: - - port: 9080 - name: http - selector: - app: productpage ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-productpage ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: productpage-v1 - labels: - app: productpage - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: productpage - version: v1 - template: - metadata: - labels: - app: productpage - version: v1 - spec: - serviceAccountName: bookinfo-productpage - containers: - - name: productpage - image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -``` - -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/_index.md deleted file mode 100644 index bcfa93138..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 1. Enable Istio in the Cluster -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster - - /rancher/v2.x/en/istio/setup/enable-istio-in-cluster ---- - ->**Prerequisites:** -> ->- Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. ->- If you have pod security policies, you will need to install Istio with the CNI enabled. For details, see [this section.]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp) ->- To install Istio on an RKE2 cluster, additional steps are required. For details, see [this section.]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/rke2/) ->- To install Istio in a cluster where the Canal network plug-in is in use and the Project Network isolation option is enabled, additional steps are required. For details, see [this section.]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/canal-and-project-network) - -1. From the **Cluster Explorer**, navigate to available **Charts** in **Apps & Marketplace** -1. Select the Istio chart from the rancher provided charts -1. If you have not already installed your own monitoring app, you will be prompted to install the rancher-monitoring app. Optional: Set your Selector or Scrape config options on rancher-monitoring app install. -1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. -1. Optional: Make additional configuration changes to values.yaml if needed. -1. Optional: Add additional resources or configuration via the [overlay file.]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/#overlay-file) -1. Click **Install**. - -**Result:** Istio is installed at the cluster level. - -# Additional Config Options - -For more information on configuring Istio, refer to the [configuration reference.]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference) diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-namespace/_index.md deleted file mode 100644 index 24afaf78f..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-namespace/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: 2. Enable Istio in a Namespace -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace - - /rancher/v2.x/en/istio/setup/enable-istio-in-namespace ---- - -You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. - -This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. - -> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio installed. - -1. In the Rancher **Cluster Explorer,** open the kubectl shell. -1. Then run `kubectl label namespace istio-injection=enabled` - -**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. - -### Verifying that Automatic Istio Sidecar Injection is Enabled - -To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. - -### Excluding Workloads from Being Injected with the Istio Sidecar - -If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: - -``` -sidecar.istio.io/inject: “false” -``` - -To add the annotation to a workload, - -1. From the **Cluster Explorer** view, use the side-nav to select the **Overview** page for workloads. -1. Go to the workload that should not have the sidecar and edit as yaml -1. Add the following key, value `sidecar.istio.io/inject: false` as an annotation on the workload -1. Click **Save.** - -**Result:** The Istio sidecar will not be injected into the workload. - -> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. - - -### [Next: Select the Nodes ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/gateway/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/gateway/_index.md deleted file mode 100644 index 31c61d355..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/gateway/_index.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: 4. Set up the Istio Gateway -weight: 5 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway - - /rancher/v2.x/en/istio/setup/gateway ---- - -The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. - -You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. - -To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two Ingresses. - -You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. - -You can route traffic into the service mesh with a load balancer or use Istio's NodePort gateway. This section describes how to set up the NodePort gateway. - -For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) - -![In an Istio-enabled cluster, you can have two Ingresses: the default Nginx Ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - -# Enable an Istio Gateway - -The ingress gateway is a Kubernetes service that will be deployed in your cluster. The Istio Gateway allows for more extensive customization and flexibility. - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click **Gateways** in the side nav bar. -1. Click **Create from Yaml**. -1. Paste your Istio Gateway yaml, or **Read from File**. -1. Click **Create**. - -**Result:** The gateway is deployed, and will now route traffic with applied rules - -# Example Istio Gateway - -We add the BookInfo app deployments in services when going through the Workloads example. Next we add an Istio Gateway so that the app is accessible from outside your cluster. - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click **Gateways** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the Gateway yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: bookinfo-gateway -spec: - selector: - istio: ingressgateway # use istio default controller - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "*" ---- -``` - -Then to deploy the VirtualService that provides the traffic routing for the Gateway - -1. Click **VirtualService** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the VirtualService yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: bookinfo -spec: - hosts: - - "*" - gateways: - - bookinfo-gateway - http: - - match: - - uri: - exact: /productpage - - uri: - prefix: /static - - uri: - exact: /login - - uri: - exact: /logout - - uri: - prefix: /api/v1/products - route: - - destination: - host: productpage - port: - number: 9080 -``` - -**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. - -Confirm that the resource exists by running: -``` -kubectl get gateway -A -``` - -The result should be something like this: -``` -NAME AGE -bookinfo-gateway 64m -``` - -### Access the ProductPage Service from a Web Browser - -To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: - -`http://:/productpage` - -To get the ingress gateway URL and port, - -1. From the **Cluster Explorer**, Click on **Workloads > Overview**. -1. Scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. -1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. - -**Result:** You should see the BookInfo app in the web browser. - -For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) - -# Troubleshooting - -The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. - -### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller - -You can try the steps in this section to make sure the Kubernetes gateway is configured properly. - -In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: - -1. From the **Cluster Explorer**, Click on **Workloads > Overview**. -1. Scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. - -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/set-up-traffic-management/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/set-up-traffic-management/_index.md deleted file mode 100644 index f492fc33a..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/set-up-traffic-management/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: 5. Set up Istio's Components for Traffic Management -weight: 6 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management - - /rancher/v2.x/en/istio/setup/set-up-traffic-management ---- - -A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - -- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. -- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. - -This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. - -In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. - -After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. - -To deploy the virtual service and destination rules for the `reviews` service, - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click **DestinationRule** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the DestinationRule yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: DestinationRule -metadata: - name: reviews -spec: - host: reviews - subsets: - - name: v1 - labels: - version: v1 - - name: v2 - labels: - version: v2 - - name: v3 - labels: - version: v3 -``` - -Then to deploy the VirtualService that provides the traffic routing that utilizes the DestinationRule - -1. Click **VirtualService** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the VirtualService yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: reviews -spec: - hosts: - - reviews - http: - - route: - - destination: - host: reviews - subset: v1 - weight: 50 - - destination: - host: reviews - subset: v3 - weight: 50 ---- -``` - -**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. - -### [Next: Generate and View Traffic]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic) diff --git a/content/rancher/v2.x/en/istio/v2.5/setup/view-traffic/_index.md b/content/rancher/v2.x/en/istio/v2.5/setup/view-traffic/_index.md deleted file mode 100644 index 3af6fbd12..000000000 --- a/content/rancher/v2.x/en/istio/v2.5/setup/view-traffic/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: 6. Generate and View Traffic -weight: 7 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic - - /rancher/v2.x/en/istio/setup/view-traffic ---- - -This section describes how to view the traffic that is being managed by Istio. - -# The Kiali Traffic Graph - -The Istio overview page provides a link to the Kiali dashboard. From the Kiali dashboard, you are able to view graphs for each namespace. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. - ->**Prerequisite:** To enable traffic to show up in the graph, ensure you have prometheus installed in the cluster. Rancher-istio installs Kiali configured by default to work with the rancher-monitoring chart. You can use rancher-monitoring or install your own monitoring solution. Optional: you can change configuration on how data scraping occurs by setting the [Selectors & Scrape Configs]({{}}/rancher/v2.x/en/istio/v2.5/configuration-reference/selectors-and-scrape) options. - -To see the traffic graph, - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click the **Kiali** link on the Istio **Overview** page. -1. Click on **Graph** in the side nav. -1. Change the namespace in the **Namespace** dropdown to view the traffic for each namespace. - -If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. - -For additional tools and visualizations, you can go to Grafana, and Prometheus dashboards from the **Monitoring** **Overview** page diff --git a/content/rancher/v2.x/en/k8s-in-rancher/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/_index.md deleted file mode 100644 index 51100243b..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Kubernetes Resources -weight: 19 -aliases: - - /rancher/v2.x/en/concepts/ - - /rancher/v2.x/en/tasks/ - - /rancher/v2.x/en/concepts/resources/ ---- - -> The Cluster Explorer is a new feature in Rancher v2.5 that allows you to view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. This section will be updated to reflect the way that Kubernetes resources are handled in Rancher v2.5. - -## Workloads - -Deploy applications to your cluster nodes using [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. - -When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. - -Following a workload deployment, you can continue working with it. You can: - -- [Upgrade]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. -- [Roll back]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. -- [Add a sidecar]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. - -## Load Balancing and Ingress - -### Load Balancers - -After you launch an application, it's only available within the cluster. It can't be reached externally. - -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -#### Ingress - -Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. - -Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. - -For more information, see [Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - -When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. - -For more information, see [Global DNS]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/). - -## Service Discovery - -After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. - -For more information, see [Service Discovery]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery). - -## Pipelines - -After your project has been [configured to a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines/#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. - -For more information, see [Pipelines]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). - -## Applications - -Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. - -For more information, see [Applications in a Project]({{}}/rancher/v2.x/en/catalog/apps/). - -## Kubernetes Resources - -Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. - -Resources include: - -- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. -- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. -- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. -- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md deleted file mode 100644 index 02d9c9163..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Encrypting HTTP Communication -description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments -weight: 3060 -aliases: - - /rancher/v2.x/en/tasks/projects/add-ssl-certificates/ - - /rancher/v2.x/en/k8s-in-rancher/certificates ---- - -When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. - -Add SSL certificates to either projects, namespaces, or both. A project scoped certificate will be available in all its namespaces. - ->**Prerequisites:** You must have a TLS private key and certificate available to upload. - -1. From the **Global** view, select the project where you want to deploy your ingress. - -1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. (For Rancher before v2.3, click **Resources > Certificates.**) - -1. Enter a **Name** for the certificate. - - >**Note:** Kubernetes classifies SSL certificates as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your SSL certificate must have a unique name among the other certificates, registries, and secrets within your project/workspace. - -1. Select the **Scope** of the certificate. - - - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. - - - **Available to a single namespace:** The certificate is only available for the deployments in one [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. - -1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Private key files end with an extension of `.key`. - -1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Certificate files end with an extension of `.crt`. - -**Result:** Your certificate is added to the project or namespace. You can now add it to deployments. - -- If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. -- If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. -- Your certificate is added to the **Resources > Secrets > Certificates** view. (For Rancher before v2.3, it is added to **Resources > Certificates.**) - -## What's Next? - -Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md deleted file mode 100644 index 062796fc0..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: ConfigMaps -weight: 3061 -aliases: - - /rancher/v2.x/en/tasks/projects/add-configmaps - - /rancher/v2.x/en/k8s-in-rancher/configmaps ---- - -While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). - -ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. - ->**Note:** ConfigMaps can only be applied to namespaces and not projects. - -1. From the **Global** view, select the project containing the namespace that you want to add a ConfigMap to. - -1. From the main menu, select **Resources > Config Maps**. Click **Add Config Map**. - -1. Enter a **Name** for the Config Map. - - >**Note:** Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. - -1. Select the **Namespace** you want to add Config Map to. You can also add a new namespace on the fly by clicking **Add to a new namespace**. - -1. From **Config Map Values**, click **Add Config Map Value** to add a key value pair to your ConfigMap. Add as many values as you need. - -1. Click **Save**. - - >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/). - > - >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. - -## What's Next? - -Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: - -- Application environment variables. -- Specifying parameters for a Volume mounted to the workload. - -For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md deleted file mode 100644 index 421c2cb9f..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: The Horizontal Pod Autoscaler -description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment -weight: 3026 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/horizontal-pod-autoscaler ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. - -Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. - -You can create, manage, and delete HPAs using the Rancher UI in Rancher v2.3.0-alpha4 and higher versions. It only supports HPA in the `autoscaling/v2beta2` API. - -## Managing HPAs - -The way that you manage HPAs is different based on your version of the Kubernetes API: - -- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. -- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. - -HPAs are also managed differently based on your version of Rancher: - -- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). -- **For Rancher Before v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). - -You might have additional HPA installation steps if you are using an older version of Rancher: - -- **For Rancher v2.0.7+:** Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. -- **For Rancher Before v2.0.7:** Clusters created in Rancher before v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). - -## Testing HPAs with a Service Deployment - -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). - -You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md deleted file mode 100644 index 073b9bbb6..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Background Information on HPAs -weight: 3027 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. - -## Why Use Horizontal Pod Autoscaler? - -Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: - -- A minimum and maximum number of pods allowed to run, as defined by the user. -- Observed CPU/memory use, as reported in resource metrics. -- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. - -HPA improves your services by: - -- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. -- Increase/decrease performance as needed to accomplish service level agreements. - -## How HPA Works - -![HPA Schema]({{}}/img/rancher/horizontal-pod-autoscaler.jpg) - -HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: - -Flag | Default | Description | ----------|----------|----------| - `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. - `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. - `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. - - -For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). - -## Horizontal Pod Autoscaler API Objects - -HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. - -For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md deleted file mode 100644 index 989eb74cd..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Manual HPA Installation for Clusters Created Before Rancher v2.0.7 -weight: 3050 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-for-rancher-before-2_0_7 ---- - -This section describes how to manually install HPAs for clusters created with Rancher before v2.0.7. This section also describes how to configure your HPA to scale up or down, and how to assign roles to your HPA. - -Before you can use HPA in your Kubernetes cluster, you must fulfill some requirements. - -### Requirements - -Be sure that your Kubernetes cluster services are running with these flags at minimum: - -- kube-api: `requestheader-client-ca-file` -- kubelet: `read-only-port` at 10255 -- kube-controller: Optional, just needed if distinct values than default are required. - - - `horizontal-pod-autoscaler-downscale-delay: "5m0s"` - - `horizontal-pod-autoscaler-upscale-delay: "3m0s"` - - `horizontal-pod-autoscaler-sync-period: "30s"` - -For an RKE Kubernetes cluster definition, add this snippet in the `services` section. To add this snippet using the Rancher v2.0 UI, open the **Clusters** view and select **⋮ > Edit** for the cluster in which you want to use HPA. Then, from **Cluster Options**, click **Edit as YAML**. Add the following snippet to the `services` section: - -``` -services: -... - kube-api: - extra_args: - requestheader-client-ca-file: "/etc/kubernetes/ssl/kube-ca.pem" - kube-controller: - extra_args: - horizontal-pod-autoscaler-downscale-delay: "5m0s" - horizontal-pod-autoscaler-upscale-delay: "1m0s" - horizontal-pod-autoscaler-sync-period: "30s" - kubelet: - extra_args: - read-only-port: 10255 -``` - -Once the Kubernetes cluster is configured and deployed, you can deploy metrics services. - ->**Note:** `kubectl` command samples in the sections that follow were tested in a cluster running Rancher v2.0.6 and Kubernetes v1.10.1. - -### Configuring HPA to Scale Using Resource Metrics - -To create HPA resources based on resource metrics such as CPU and memory use, you need to deploy the `metrics-server` package in the `kube-system` namespace of your Kubernetes cluster. This deployment allows HPA to consume the `metrics.k8s.io` API. - ->**Prerequisite:** You must be running `kubectl` 1.8 or later. - -1. Connect to your Kubernetes cluster using `kubectl`. - -1. Clone the GitHub `metrics-server` repo: - ``` - # git clone https://github.com/kubernetes-incubator/metrics-server - ``` - -1. Install the `metrics-server` package. - ``` - # kubectl create -f metrics-server/deploy/1.8+/ - ``` - -1. Check that `metrics-server` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check the service pod for a status of `running`. Enter the following command: - ``` - # kubectl get pods -n kube-system - ``` - Then check for the status of `running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - metrics-server-6fbfb84cdd-t2fk9 1/1 Running 0 8h - ... - ``` - 1. Check the service logs for service availability. Enter the following command: - ``` - # kubectl -n kube-system logs metrics-server-6fbfb84cdd-t2fk9 - ``` - Then review the log to confirm that the `metrics-server` package is running. - {{% accordion id="metrics-server-run-check" label="Metrics Server Log Output" %}} - I0723 08:09:56.193136 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:'' - I0723 08:09:56.193574 1 heapster.go:72] Metrics Server version v0.2.1 - I0723 08:09:56.194480 1 configs.go:61] Using Kubernetes client with master "https://10.43.0.1:443" and version - I0723 08:09:56.194501 1 configs.go:62] Using kubelet port 10255 - I0723 08:09:56.198612 1 heapster.go:128] Starting with Metric Sink - I0723 08:09:56.780114 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) - I0723 08:09:57.391518 1 heapster.go:101] Starting Heapster API server... - [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] listing is available at https:///swaggerapi - [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ - I0723 08:09:57.394080 1 serve.go:85] Serving securely on 0.0.0.0:443 - {{% /accordion %}} - - -1. Check that the metrics api is accessible from `kubectl`. - - - - If you are accessing the cluster through Rancher, enter your Server URL in the `kubectl` config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/metrics.k8s.io/v1beta1 - ``` - If the API is working correctly, you should receive output similar to the output below. - ``` - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} - ``` - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/metrics.k8s.io/v1beta1 - ``` - If the API is working correctly, you should receive output similar to the output below. - ``` - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} - ``` - -### Assigning Additional Required Roles to Your HPA - -By default, HPA reads resource and custom metrics with the user `system:anonymous`. Assign `system:anonymous` to `view-resource-metrics` and `view-custom-metrics` in the ClusterRole and ClusterRoleBindings manifests. These roles are used to access metrics. - -To do it, follow these steps: - -1. Configure `kubectl` to connect to your cluster. - -1. Copy the ClusterRole and ClusterRoleBinding manifest for the type of metrics you're using for your HPA. - {{% accordion id="cluster-role-resource-metrics" label="Resource Metrics: ApiGroups resource.metrics.k8s.io" %}} - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: view-resource-metrics - rules: - - apiGroups: - - metrics.k8s.io - resources: - - pods - - nodes - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: view-resource-metrics - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: view-resource-metrics - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:anonymous - {{% /accordion %}} -{{% accordion id="cluster-role-custom-resources" label="Custom Metrics: ApiGroups custom.metrics.k8s.io" %}} - - ``` - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: view-custom-metrics - rules: - - apiGroups: - - custom.metrics.k8s.io - resources: - - "*" - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: view-custom-metrics - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: view-custom-metrics - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:anonymous - ``` -{{% /accordion %}} -1. Create them in your cluster using one of the follow commands, depending on the metrics you're using. - ``` - # kubectl create -f - # kubectl create -f - ``` diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md deleted file mode 100644 index 49d5a2821..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Managing HPAs with kubectl -weight: 3029 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl ---- - -This section describes HPA management with `kubectl`. This document has instructions for how to: - -- Create an HPA -- Get information on HPAs -- Delete an HPA -- Configure your HPAs to scale with CPU or memory utilization -- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics - -### Note For Rancher v2.3.x - -In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. - -### Note For Rancher Before v2.0.7 - -Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). - -##### Basic kubectl Command for Managing HPAs - -If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: - -- Creating HPA - - - With manifest: `kubectl create -f ` - - - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` - -- Getting HPA info - - - Basic: `kubectl get hpa hello-world` - - - Detailed description: `kubectl describe hpa hello-world` - -- Deleting HPA - - - `kubectl delete hpa hello-world` - -##### HPA Manifest Definition Example - -The HPA manifest is the config file used for managing an HPA with `kubectl`. - -The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. - -```yml -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi -``` - - -Directive | Description ----------|----------| - `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | - `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | - `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | - `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. - `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. - `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. -
- -##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) - -Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. Run the following commands to check if metrics are available in your installation: - -``` -$ kubectl top nodes -NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% -node-controlplane 196m 9% 1623Mi 42% -node-etcd 80m 4% 1090Mi 28% -node-worker 64m 3% 1146Mi 29% -$ kubectl -n kube-system top pods -NAME CPU(cores) MEMORY(bytes) -canal-pgldr 18m 46Mi -canal-vhkgr 20m 45Mi -canal-x5q5v 17m 37Mi -canal-xknnz 20m 37Mi -kube-dns-7588d5b5f5-298j2 0m 22Mi -kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi -metrics-server-97bc649d5-jxrlt 0m 12Mi -$ kubectl -n kube-system logs -l k8s-app=metrics-server -I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true -I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 -I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version -I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 -I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink -I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) -I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ -I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 -``` - -If you have created your cluster in Rancher v2.0.6 or before, please refer to [Manual installation](#manual-installation) - -##### Configuring HPA to Scale Using Custom Metrics with Prometheus - -You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. - -For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: - -- Prometheus is deployed in the cluster. -- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. -- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` - -Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. - -For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). - -1. Initialize Helm in your cluster. - ``` - # kubectl -n kube-system create serviceaccount tiller - kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` - -1. Clone the `banzai-charts` repo from GitHub: - ``` - # git clone https://github.com/banzaicloud/banzai-charts - ``` - -1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. - ``` - # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system - ``` - -1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check that the service pod is `Running`. Enter the following command. - ``` - # kubectl get pods -n kube-system - ``` - From the resulting output, look for a status of `Running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h - ... - ``` - 1. Check the service logs to make sure the service is running correctly by entering the command that follows. - ``` - # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system - ``` - Then review the log output to confirm the service is running. - {{% accordion id="prometheus-logs" label="Prometheus Adaptor Logs" %}} - ... - I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds - I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: - I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT - I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json - I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 - I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} - I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.sslip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK - I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} - I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] - I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} - ... - {{% /accordion %}} - - - -1. Check that the metrics API is accessible from kubectl. - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} - - - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response-rancher" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md deleted file mode 100644 index f9bd8e81d..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Managing HPAs with the Rancher UI -weight: 3028 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui ---- - -_Available as of v2.3.0_ - -The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. - -If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -## Creating an HPA - -1. From the **Global** view, open the project that you want to deploy a HPA to. - -1. Click **Resources > HPA.** - -1. Click **Add HPA.** - -1. Enter a **Name** for the HPA. - -1. Select a **Namespace** for the HPA. - -1. Select a **Deployment** as scale target for the HPA. - -1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. - -1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -1. Click **Create** to create the HPA. - -> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. - -## Get HPA Metrics and Status - -1. From the **Global** view, open the project with the HPAs you want to look at. - -1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. - -1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. - - -## Deleting an HPA - -1. From the **Global** view, open the project that you want to delete an HPA from. - -1. Click **Resources > HPA.** - -1. Find the HPA which you would like to delete. - -1. Click **⋮ > Delete**. - -1. Click **Delete** to confirm. - -> **Result:** The HPA is deleted from the current cluster. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md deleted file mode 100644 index 8ec7bc9ee..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ /dev/null @@ -1,494 +0,0 @@ ---- -title: Testing HPAs with kubectl -weight: 3031 - -aliases: - - /rancher/v2.x/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa ---- - -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). - -For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. - -1. Configure `kubectl` to connect to your Kubernetes cluster. - -2. Copy the `hello-world` deployment manifest below. -{{% accordion id="hello-world" label="Hello World Manifest" %}} -``` -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - resources: - requests: - cpu: 500m - memory: 64Mi - ports: - - containerPort: 80 - protocol: TCP - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: hello-world - namespace: default -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: hello-world -``` -{{% /accordion %}} - -1. Deploy it to your cluster. - - ``` - # kubectl create -f - ``` - -1. Copy one of the HPAs below based on the metric type you're using: -{{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 1000Mi -``` -{{% /accordion %}} -{{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi - - type: Pods - pods: - metricName: cpu_system - targetAverageValue: 20m -``` -{{% /accordion %}} - -1. View the HPA info and description. Confirm that metric data is shown. - {{% accordion id="hpa-info-resource-metrics" label="Resource Metrics" %}} -1. Enter the following commands. - ``` - # kubectl get hpa - NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE - hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m - # kubectl describe hpa - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 1253376 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - {{% accordion id="hpa-info-custom-metrics" label="Custom Metrics" %}} -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive the output that follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 3514368 / 100Mi - "cpu_system" on pods: 0 / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - - -1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). - -1. Test that pod autoscaling works as intended.

- **To Test Autoscaling Using Resource Metrics:** - {{% accordion id="observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to two pods based on CPU Usage. - -1. View your HPA. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10928128 / 100Mi - resource cpu on pods (as a percentage of request): 56% (280m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm you've scaled to two pods. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-upscale-3-pods-cpu-cooldown" label="Upscale to 3 pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 9424896 / 100Mi - resource cpu on pods (as a percentage of request): 66% (333m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - ``` -2. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-f46kh 0/1 Running 0 1m - hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10070016 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` - {{% /accordion %}} -
-**To Test Autoscaling Using Custom Metrics:** - {{% accordion id="custom-observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale two pods based on CPU usage. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8159232 / 100Mi - "cpu_system" on pods: 7m / 20m - resource cpu on pods (as a percentage of request): 64% (321m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm two pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` - {{% /accordion %}} -{{% accordion id="observe-upscale-3-pods-cpu-cooldown-2" label="Upscale to 3 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows: - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - ``` -1. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - # kubectl get pods - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="observe-upscale-4-pods" label="Upscale to 4 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm four pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m - hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="custom-metrics-observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive similar output to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8101888 / 100Mi - "cpu_system" on pods: 8m / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` -1. Enter the following command to confirm a single pods is running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md deleted file mode 100644 index c4bf31383..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Set Up Load Balancer and Ingress Controller within Rancher -description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers -weight: 3040 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress ---- - -Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. - -## Load Balancers - -After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. - -If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -### Load Balancer Limitations - -Load Balancers have a couple of limitations you should be aware of: - -- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. - -- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: - - - - [Support for Layer-4 Load Balancing]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) - - - [Support for Layer-7 Load Balancing]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) - -## Ingress - -As mentioned in the limitations above, the disadvantages of using a load balancer are: - -- Load Balancers can only handle one IP address per service. -- If you run multiple services in your cluster, you must have a load balancer for each service. -- It can be expensive to have a load balancer for every service. - -In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. - -Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. - -Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. - -Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. - -Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). - -Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. - ->**Using Rancher in a High Availability Configuration?** -> ->Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. - -- For more information on how to set up ingress in Rancher, see [Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). -- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md deleted file mode 100644 index e81635886..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Adding Ingresses to Your Project -description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project -weight: 3042 -aliases: - - /rancher/v2.x/en/tasks/workloads/add-ingress/ - - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress ---- - -Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/). - -1. From the **Global** view, open the project that you want to add ingress to. -1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions before v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. -1. Enter a **Name** for the ingress. -1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) on the fly by clicking **Add to a new namespace**. -1. Create ingress forwarding **Rules**. For help configuring the rules, refer to [this section.](#ingress-rule-configuration) If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. -1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. - -**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. - - -# Ingress Rule Configuration - -- [Automatically generate a sslip.io hostname](#automatically-generate-a-sslip-io-hostname) -- [Specify a hostname to use](#specify-a-hostname-to-use) -- [Use as the default backend](#use-as-the-default-backend) -- [Certificates](#certificates) -- [Labels and Annotations](#labels-and-annotations) - -### Automatically generate a sslip.io hostname - -If you choose this option, ingress routes requests to hostname to a DNS name that's automatically generated. Rancher uses [sslip.io](http://sslip.io/) to automatically generates the DNS name. This option is best used for testing, _not_ production environments. - ->**Note:** To use this option, you must be able to resolve to `sslip.io` addresses. - -1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. -1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. -1. Select a workload or service from the **Target** drop-down list for each target you've added. -1. Enter the **Port** number that each target operates on. - -### Specify a hostname to use - -If you use this option, ingress routes requests for a hostname to the service or workload that you specify. - -1. Enter the hostname that your ingress will handle request forwarding for. For example, `www.mysite.com`. -1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. -1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. -1. Select a workload or service from the **Target** drop-down list for each target you've added. -1. Enter the **Port** number that each target operates on. - -### Use as the default backend - -Use this option to set an ingress rule for handling requests that don't match any other ingress rules. For example, use this option to route requests that can't be found to a `404` page. - ->**Note:** If you deployed Rancher using RKE, a default backend for 404s and 202s is already configured. - -1. Add a **Target Backend**. Click either **Service** or **Workload** to add the target. -1. Select a service or workload from the **Target** drop-down list. - -### Certificates ->**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/). - -1. Click **Add Certificate**. -1. Select a **Certificate** from the drop-down list. -1. Enter the **Host** using encrypted communication. -1. To add additional hosts that use the certificate, click **Add Hosts**. - -### Labels and Annotations - -Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. - -For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). \ No newline at end of file diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md deleted file mode 100644 index dd380e539..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "Layer 4 and Layer 7 Load Balancing" -description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" -weight: 3041 -aliases: - - /rancher/v2.x/en/concepts/load-balancing/ - - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers ---- -Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. - -## Layer-4 Load Balancer - -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. - -Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. - -> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. - -### Support for Layer-4 Load Balancing - -Support for layer-4 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-4 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Limited NGINX or third-party Ingress* -RKE on vSphere | Limited NGINX or third party-Ingress* -RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* -Third-party MetalLB | Limited NGINX or third-party Ingress* - -\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) - -## Layer-7 Load Balancer - -Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. - -### Support for Layer-7 Load Balancing - -Support for layer-7 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-7 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GKE cloud provider -Azure AKS | Not Supported -RKE on EC2 | Nginx Ingress Controller -RKE on DigitalOcean | Nginx Ingress Controller -RKE on vSphere | Nginx Ingress Controller -RKE on Custom Hosts
(e.g. bare-metal servers) | Nginx Ingress Controller - -### Host Names in Layer-7 Load Balancer - -Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. - -Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: - -1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. -2. Ask Rancher to generate an sslip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name ..a.b.c.d.sslip.io. - -The benefit of using sslip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. - -## Related Links - -- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md deleted file mode 100644 index 2a5319421..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Kubernetes Registry and Docker Registry -description: Learn about the Docker registry and Kubernetes registry, their use cases and how to use a private registry with the Rancher UI -weight: 3063 -aliases: - - /rancher/v2.x/en/tasks/projects/add-registries/ - - /rancher/v2.x/en/k8s-in-rancher/registries - - /rancher/v2.x/en/k8s-resources/k8s-in-rancher/registries ---- -Registries are Kubernetes secrets containing credentials used to authenticate with [private Docker registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). - -The word "registry" can mean two things, depending on whether it is used to refer to a Docker or Kubernetes registry: - -- A **Docker registry** contains Docker images that you can pull in order to use them in your deployment. The registry is a stateless, scalable server side application that stores and lets you distribute Docker images. -- The **Kubernetes registry** is an image pull secret that your deployment uses to authenticate with a Docker registry. - -Deployments use the Kubernetes registry secret to authenticate with a private Docker registry and then pull a Docker image hosted on it. - -Currently, deployments pull the private registry credentials automatically only if the workload is created in the Rancher UI and not when it is created via kubectl. - -# Creating a Registry - ->**Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. - -1. From the **Global** view, select the project containing the namespace(s) where you want to add a registry. - -1. From the main menu, click **Resources > Secrets > Registry Credentials.** (For Rancher before v2.3, click **Resources > Registries.)** - -1. Click **Add Registry.** - -1. Enter a **Name** for the registry. - - >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your registry must have a unique name among all secrets within your workspace. - -1. Select a **Scope** for the registry. You can either make the registry available for the entire project or a single [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). - -1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password. - -1. Click **Save**. - -**Result:** - -- Your secret is added to the project or namespace, depending on the scope you chose. -- You can view the secret in the Rancher UI from the **Resources > Registries** view. -- Any workload that you create in the Rancher UI will have the credentials to access the registry if the workload is within the registry's scope. - -# Using a Private Registry - -You can deploy a workload with an image from a private registry through the Rancher UI, or with `kubectl`. - -### Using the Private Registry with the Rancher UI - -To deploy a workload with an image from your private registry, - -1. Go to the project view, -1. Click **Resources > Workloads.** In versions before v2.3.0, go to the **Workloads** tab. -1. Click **Deploy.** -1. Enter a unique name for the workload and choose a namespace. -1. In the **Docker Image** field, enter the URL of the path to the Docker image in your private registry. For example, if your private registry is on Quay.io, you could use `quay.io//`. -1. Click **Launch.** - -**Result:** Your deployment should launch, authenticate using the private registry credentials you added in the Rancher UI, and pull the Docker image that you specified. - -### Using the Private Registry with kubectl - -When you create the workload using `kubectl`, you need to configure the pod so that its YAML has the path to the image in the private registry. You also have to create and reference the registry secret because the pod only automatically gets access to the private registry credentials if it is created in the Rancher UI. - -The secret has to be created in the same namespace where the workload gets deployed. - -Below is an example `pod.yml` for a workload that uses an image from a private registry. In this example, the pod uses an image from Quay.io, and the .yml specifies the path to the image. The pod authenticates with the registry using credentials stored in a Kubernetes secret called `testquay`, which is specified in `spec.imagePullSecrets` in the `name` field: - -``` -apiVersion: v1 -kind: Pod -metadata: - name: private-reg -spec: - containers: - - name: private-reg-container - image: quay.io// - imagePullSecrets: - - name: testquay -``` - -In this example, the secret named `testquay` is in the default namespace. - -You can use `kubectl` to create the secret with the private registry credentials. This command creates the secret named `testquay`: - -``` -kubectl create secret docker-registry testquay \ - --docker-server=quay.io \ - --docker-username= \ - --docker-password= -``` - -To see how the secret is stored in Kubernetes, you can use this command: - -``` -kubectl get secret testquay --output="jsonpath={.data.\.dockerconfigjson}" | base64 --decode -``` - -The result looks like this: - -``` -{"auths":{"quay.io":{"username":"","password":"","auth":"c291bXlhbGo6dGVzdGFiYzEyMw=="}}} -``` - -After the workload is deployed, you can check if the image was pulled successfully: - -``` -kubectl get events -``` -The result should look like this: -``` -14s Normal Scheduled Pod Successfully assigned default/private-reg2 to minikube -11s Normal Pulling Pod pulling image "quay.io//" -10s Normal Pulled Pod Successfully pulled image "quay.io//" -``` - -For more information, refer to the Kubernetes documentation on [creating a pod that uses your secret.](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md deleted file mode 100644 index e1cfb93e2..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Secrets -weight: 3062 -aliases: - - /rancher/v2.x/en/tasks/projects/add-a-secret - - /rancher/v2.x/en/k8s-in-rancher/secrets ---- - -[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. - -> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.]({{}}/rancher/v2.x/en/k8s-in-rancher/registries) - -When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# Creating Secrets - -When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. - -1. From the **Global** view, select the project containing the namespace(s) where you want to add a secret. - -2. From the main menu, select **Resources > Secrets**. Click **Add Secret**. - -3. Enter a **Name** for the secret. - - >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. - -4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces). - -5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. - - >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -1. Click **Save**. - -**Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# What's Next? - -Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. - -For more information on adding secret to a workload, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md deleted file mode 100644 index 87dd3c842..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Service Discovery -weight: 3045 -aliases: - - /rancher/v2.x/en/tasks/workloads/add-a-dns-record/ - - /rancher/v2.x/en/k8s-in-rancher/service-discovery ---- - -For every workload created, a complementing Service Discovery entry is created. This Service Discovery entry enables DNS resolution for the workload's pods using the following naming convention: -`..svc.cluster.local`. - -However, you also have the option of creating additional Service Discovery records. You can use these additional records so that a given [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) resolves with one or more external IP addresses, an external hostname, an alias to another DNS record, other workloads, or a set of pods that match a selector that you create. - -1. From the **Global** view, open the project that you want to add a DNS record to. - -1. Click **Resources** in the main navigation bar. Click the **Service Discovery** tab. (In versions before v2.3.0, just click the **Service Discovery** tab.) Then click **Add Record**. - -1. Enter a **Name** for the DNS record. This name is used for DNS resolution. - -1. Select a **Namespace** from the drop-down list. Alternatively, you can create a new namespace on the fly by clicking **Add to a new namespace**. - -1. Select one of the **Resolves To** options to route requests to the DNS record. - - 1. **One or more external IP addresses** - - Enter an IP address in the **Target IP Addresses** field. Add more IP addresses by clicking **Add Target IP**. - - 1. **An external hostname** - - Enter a **Target Hostname**. - - 1. **Alias of another DNS record's value** - - Click **Add Target Record** and select another DNS record from the **Value** drop-down. - - 1. **One or more workloads** - - Click **Add Target Workload** and select another workload from the **Value** drop-down. - - 1. **The set of pods which match a selector** - - Enter key value pairs of [label selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) to create a record for all pods that match your parameters. - -1. Click **Create** - -**Result:** A new DNS record is created. - -- You can view the record by from the project's **Service Discovery** tab. -- When you visit the new DNS name for the new record that you created (`..svc.cluster.local`), it resolves the chosen namespace. - -## Related Links - -- [Adding entries to Pod /etc/hosts with HostAliases](https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md deleted file mode 100644 index 0a0279be9..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "Kubernetes Workloads and Pods" -description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" -weight: 3025 -aliases: - - /rancher/v2.x/en/concepts/workloads/ - - /rancher/v2.x/en/tasks/workloads/ - - /rancher/v2.x/en/k8s-in-rancher/workloads ---- - -You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. - -### Pods - -[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. - -### Workloads - -_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. -Workloads let you define the rules for application scheduling, scaling, and upgrade. - -#### Workload Types - -Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: - -- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) - - _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. - -- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - - _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. - -- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - - _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. - -- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) - - _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. - -- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) - - _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. - -### Services - -In many use cases, a workload has to be either: - -- Accessed by other workloads in the cluster. -- Exposed to the outside world. - -You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. - -#### Service Types - -There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). - -- **ClusterIP** - - >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. - -- **NodePort** - - >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. - -- **LoadBalancer** - - >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. - -## Workload Options - -This section of the documentation contains instructions for deploying workloads and using workload options. - -- [Deploy Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/) -- [Upgrade Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/) -- [Rollback Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/) - -## Related Links - -### External Links - -- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md deleted file mode 100644 index 75f408059..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Adding a Sidecar -weight: 3029 -aliases: - - /rancher/v2.x/en/tasks/workloads/add-a-sidecar/ - - /rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar ---- -A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. - -1. From the **Global** view, open the project running the workload you want to add a sidecar to. - -1. Click **Resources > Workloads.** In versions before v2.3.0, select the **Workloads** tab. - -1. Find the workload that you want to extend. Select **⋮ icon (...) > Add a Sidecar**. - -1. Enter a **Name** for the sidecar. - -1. Select a **Sidecar Type**. This option determines if the sidecar container is deployed before or after the main container is deployed. - - - **Standard Container:** - - The sidecar container is deployed after the main container. - - - **Init Container:** - - The sidecar container is deployed before the main container. - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. - -1. Set the remaining options. You can read about them in [Deploying Workloads](../deploy-workloads). - -1. Click **Launch**. - -**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. - -## Related Links - -- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md deleted file mode 100644 index 416442519..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Deploying Workloads -description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. -weight: 3026 -aliases: - - /rancher/v2.x/en/tasks/workloads/deploy-workloads/ - - /rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads ---- - -Deploy a workload to run an application in one or more containers. - -1. From the **Global** view, open the project that you want to deploy a workload to. - -1. 1. Click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) From the **Workloads** view, click **Deploy**. - -1. Enter a **Name** for the workload. - -1. Select a [workload type]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, by can change the workload type by clicking **More options.** - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. - -1. Either select an existing [namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces), or click **Add to a new namespace** and enter a new namespace. - -1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/#services). - -1. Configure the remaining options: - - - **Environment Variables** - - Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). - - - **Node Scheduling** - - **Health Check** - - **Volumes** - - Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/). - - When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. This option is available in the UI as of Rancher v2.2.0. - - - **Scaling/Upgrade Policy** - - >**Amazon Note for Volumes:** - > - > To mount an Amazon EBS volume: - > - >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. - > - >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes). - - -1. Click **Show Advanced Options** and configure: - - - **Command** - - **Networking** - - **Labels & Annotations** - - **Security and Host Config** - -1. Click **Launch**. - -**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md deleted file mode 100644 index 7a13ca4ca..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Rolling Back Workloads -weight: 3027 -aliases: - - /rancher/v2.x/en/tasks/workloads/rollback-workloads/ - - /rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads ---- - -Sometimes there is a need to rollback to the previous version of the application, either for debugging purposes or because an upgrade did not go as planned. - -1. From the **Global** view, open the project running the workload you want to rollback. - -1. Find the workload that you want to rollback and select **Vertical ⋮ (... ) > Rollback**. - -1. Choose the revision that you want to roll back to. Click **Rollback**. - -**Result:** Your workload reverts to the previous version that you chose. Wait a few minutes for the action to complete. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md deleted file mode 100644 index 638cd0ccd..000000000 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Upgrading Workloads -weight: 3028 -aliases: - - /rancher/v2.x/en/tasks/workloads/upgrade-workloads/ - - /rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads ---- -When a new version of an application image is released on Docker Hub, you can upgrade any workloads running a previous version of the application to the new one. - -1. From the **Global** view, open the project running the workload you want to upgrade. - -1. Find the workload that you want to upgrade and select **Vertical ⋮ (... ) > Edit**. - -1. Update the **Docker Image** to the updated version of the application image on Docker Hub. - -1. Update any other options that you want to change. - -1. Review and edit the workload's **Scaling/Upgrade** policy. - - These options control how the upgrade rolls out to containers that are currently running. For example, for scalable deployments, you can choose whether you want to stop old pods before deploying new ones, or vice versa, as well as the upgrade batch size. - -1. Click **Upgrade**. - -**Result:** The workload begins upgrading its containers, per your specifications. Note that scaling up the deployment or updating the upgrade/scaling policy won't result in the pods recreation. diff --git a/content/rancher/v2.x/en/logging/_index.md b/content/rancher/v2.x/en/logging/_index.md deleted file mode 100644 index 61e901e7c..000000000 --- a/content/rancher/v2.x/en/logging/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Rancher Integration with Logging Services -shortTitle: Logging -description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. -metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." -weight: 16 -aliases: - - /rancher/v2.x/en/dashboard/logging ---- - - -If you are using Rancher v2.5, refer to [this section.](./v2.5) - -If you are using Rancher v2.0-v2.4, refer to [this section.](./v2.0.x-v2.4.x) \ No newline at end of file diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/_index.md deleted file mode 100644 index 33c8ded53..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Logging in Rancher v2.0-v2.4 -shortTitle: Rancher v2.0-v2.4 -weight: 2 ---- - - -This section contains documentation for the logging features that were available in Rancher before v2.5. - -- [Cluster logging](./cluster-logging) -- [Project logging](./project-logging) \ No newline at end of file diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/_index.md deleted file mode 100644 index d2bd9b443..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/_index.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Cluster Logging in Rancher v2.0.x-v2.4.x -shortTitle: Cluster Logging -description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. -metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." -weight: 3 -aliases: - - /rancher/v2.x/en/tasks/logging/ - - /rancher/v2.x/en/cluster-admin/tools/logging - - /rancher/v2.x/en/logging/legacy/cluster-logging ---- - -> In Rancher 2.5, the logging application was improved. There are now two ways to enable logging. The older way is documented in this section, and the new application for logging is documented [here.]({{}}/rancher/v2.x/en/logging/v2.5) - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debug and troubleshoot problems - -Rancher supports integration with the following services: - -- Elasticsearch -- Splunk -- Kafka -- Syslog -- Fluentd - -This section covers the following topics: - -- [How logging integrations work](#how-logging-integrations-work) -- [Requirements](#requirements) -- [Logging scope](#logging-scope) -- [Enabling cluster logging](#enabling-cluster-logging) - -# How Logging Integrations Work - -Rancher can integrate with popular external services used for event streams, telemetry, or search. These services can log errors and warnings in your Kubernetes infrastructure to a stream. - -These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. - -When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. - -Additionally, you'll have the opportunity to enter key-value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key-value pairs. - ->**Note:** You can only configure one logging service per cluster or per project. - -# Requirements - -The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: - -``` -$ docker info | grep 'Logging Driver' -Logging Driver: json-file -``` - -# Logging Scope - -You can configure logging at either cluster level or project level. - -- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. -- [Project logging]({{}}/rancher/v2.x/en/project-admin/tools/logging/) writes logs for every pod in that particular project. - -Logs that are sent to your logging service are from the following locations: - - - Pod logs stored at `/var/log/containers`. - - Kubernetes system components logs stored at `/var/lib/rancher/rke/log/`. - -# Enabling Cluster Logging - -As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. - -1. From the **Global** view, navigate to the cluster that you want to configure cluster logging. - -1. Select **Tools > Logging** in the navigation bar. - -1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports integration with the following services: - - - [Elasticsearch]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) - -1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. - - - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. - - - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) - - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) - - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) - - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) - - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) - - - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. - 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. - -1. (Optional) Complete the **Additional Logging Configuration** form. - - 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. - - 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. - - 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. - -1. Click **Test**. Rancher sends a test log to the service. - - > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. - -1. Click **Save**. - -**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. - -## Related Links - -[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/elasticsearch/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/elasticsearch/_index.md deleted file mode 100644 index 5c73295a1..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/elasticsearch/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Elasticsearch -weight: 200 -aliases: - - /rancher/v2.x/en/tools/logging/elasticsearch/ - - /rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch - - /rancher/v2.x/en/logging/legacy/cluster-logging/elasticsearch ---- - -If your organization uses [Elasticsearch](https://www.elastic.co/), either on premise or in the cloud, you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Elasticsearch deployment to view logs. - ->**Prerequisites:** Configure an [Elasticsearch deployment](https://www.elastic.co/guide/en/cloud/saas-release/ec-create-deployment.html). - -## Elasticsearch Deployment Configuration - -1. In the **Endpoint** field, enter the IP address and port of your Elasticsearch instance. You can find this information from the dashboard of your Elasticsearch deployment. - - * Elasticsearch usually uses port `9200` for HTTP and `9243` for HTTPS. - -1. If you are using [X-Pack Security](https://www.elastic.co/guide/en/x-pack/current/xpack-introduction.html), enter your Elasticsearch **Username** and **Password** for authentication. - -1. Enter an [Index Pattern](https://www.elastic.co/guide/en/kibana/current/index-patterns.html). - -## SSL Configuration - -If your instance of Elasticsearch uses SSL, your **Endpoint** will need to begin with `https://`. With the correct endpoint, the **SSL Configuration** form is enabled and ready to be completed. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Enter your **Client Key Password**. - -1. Enter your **SSL Version**. The default version is `TLSv1_2`. - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/fluentd/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/fluentd/_index.md deleted file mode 100644 index a949f8628..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/fluentd/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Fluentd -weight: 600 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/logging/fluentd - - /rancher/v2.x/en/logging/legacy/cluster-logging/fluentd ---- - -If your organization uses [Fluentd](https://www.fluentd.org/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Fluentd server to view logs. - ->**Prerequisites:** Configure Fluentd input forward to receive the event stream. -> ->See [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/in_forward) for details. - -## Fluentd Configuration - -You can add multiple Fluentd Servers. If you want to add additional Fluentd servers, click **Add Fluentd Server**. For each Fluentd server, complete the configuration information: - -1. In the **Endpoint** field, enter the address and port of your Fluentd instance, e.g. `http://Fluentd-server:24224`. - -1. Enter the **Shared Key** if your Fluentd Server is using a shared key for authentication. - -1. Enter the **Username** and **Password** if your Fluentd Server is using username and password for authentication. - -1. **Optional:** Enter the **Hostname** of the Fluentd server. - -1. Enter the load balancing **Weight** of the Fluentd server. If the weight of one server is 20 and the other server is 30, events will be sent in a 2:3 ratio. If you do not enter a weight, the default weight is 60. - -1. If this server is a standby server, check **Use as Standby Only**. Standby servers are used when all other servers are not available. - -After adding all the Fluentd servers, you have the option to select **Enable Gzip Compression**. By default, this is enabled because the transferred payload size will be reduced. - -## SSL Configuration - -If your Fluentd servers are using TLS, you need to select **Use TLS**. If you are using a self-signed certificate, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - ->**Note:** Fluentd does not support self-signed certificates when client authentication is enabled. diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/kafka/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/kafka/_index.md deleted file mode 100644 index 272edef39..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/kafka/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Kafka -weight: 400 -aliases: - - /rancher/v2.x/en/tools/logging/kafka/ - - /rancher/v2.x/en/cluster-admin/tools/logging/kafka - - /rancher/v2.x/en/logging/legacy/cluster-logging/kafka ---- - -If your organization uses [Kafka](https://kafka.apache.org/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Kafka server to view logs. - ->**Prerequisite:** You must have a Kafka server configured. - -## Kafka Server Configuration - -1. Select the type of **Endpoint** your Kafka server is using: - - * **Zookeeper**: Enter the IP address and port. By default, Zookeeper uses port `2181`. Please note that a Zookeeper endpoint cannot enable TLS. - * **Broker**: Click on **Add Endpoint**. For each Kafka broker, enter the IP address and port. By default, Kafka brokers use port `9092`. - -1. In the **Topic** field, enter the name of a Kafka [topic](https://kafka.apache.org/documentation/#basic_ops_add_topic) that your Kubernetes cluster submits logs to. - -## **Broker** Endpoint Type - -### SSL Configuration - -If your Kafka cluster is using SSL for the **Broker**, you need to complete the **SSL Configuration** form. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - -1. Provide the **CA Certificate PEM**. You can either copy and paste the certificate or upload it using the **Read from a file** button. - ->**Note:** Kafka does not support self-signed certificates when client authentication is enabled. - -### SASL configuration - -If your Kafka cluster is using [SASL authentication](https://kafka.apache.org/documentation/#security_sasl) for the Broker, you need to complete the **SASL Configuration** form. - -1. Enter the SASL **Username** and **Password**. - -1. Select the **SASL Type** that your Kafka cluster is using. - - * If your Kafka is using **Plain**, please ensure your Kafka cluster is using SSL. - - * If your Kafka is using **Scram**, you need to select which **Scram Mechanism** Kafka is using. diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk/_index.md deleted file mode 100644 index f2b336d1f..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Splunk -weight: 300 -aliases: - - /rancher/v2.x/en/tasks/logging/splunk/ - - /rancher/v2.x/en/tools/logging/splunk/ - - /rancher/v2.x/en/cluster-admin/tools/logging/splunk - - /rancher/v2.x/en/logging/legacy/cluster-logging/splunk ---- - -If your organization uses [Splunk](https://www.splunk.com/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Splunk server to view logs. - ->**Prerequisites:** -> ->- Configure HTTP event collection for your Splunk Server (Splunk Enterprise or Splunk Cloud). ->- Either create a new token or copy an existing token. -> ->For more information, see [Splunk Documentation](http://docs.splunk.com/Documentation/Splunk/7.1.2/Data/UsetheHTTPEventCollector#About_Event_Collector_tokens). - -## Splunk Configuration - -1. In the **Endpoint** field, enter the IP address and port for you Splunk instance (i.e. `http://splunk-server:8088`) - - * Splunk usually uses port `8088`. If you're using Splunk Cloud, you'll need to work with [Splunk support](https://www.splunk.com/en_us/support-and-services.html) to get an endpoint URL. - -1. Enter the **Token** you obtained while completing the prerequisites (i.e., when you created a token in Splunk). - -1. In the **Source** field, enter the name of the token as entered in Splunk. - -1. **Optional:** Provide one or more [index](http://docs.splunk.com/Documentation/Splunk/7.1.2/Indexer/Aboutindexesandindexers) that's allowed for your token. - -## SSL Configuration - -If your instance of Splunk uses SSL, your **Endpoint** will need to begin with `https://`. With the correct endpoint, the **SSL Configuration** form is enabled and ready to be completed. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Enter your **Client Key Password**. - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. - -## Viewing Logs - -1. Log into your Splunk server. - -1. Click on **Search & Reporting**. The number of **Indexed Events** listed should be increasing. - -1. Click on Data Summary and select the Sources tab. - ![View Logs]({{}}/img/rancher/splunk/splunk4.jpg) - -1. To view the actual logs, click on the source that you declared earlier. - ![View Logs]({{}}/img/rancher/splunk/splunk5.jpg) - -## Troubleshooting - -You can use curl to see if **HEC** is listening for HTTP event data. - -``` -$ curl http://splunk-server:8088/services/collector/event \ - -H 'Authorization: Splunk 8da70994-b1b0-4a79-b154-bfaae8f93432' \ - -d '{"event": "hello world"}' -``` - -If Splunk is configured correctly, you should receive **json** data returning `success code 0`. You should be able -to send logging data to HEC. - -If you received an error, check your configuration in Splunk and Rancher. diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/syslog/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/syslog/_index.md deleted file mode 100644 index 3311547c1..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/syslog/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Syslog -weight: 500 -aliases: - - /rancher/v2.x/en/tools/logging/syslog/ - - /rancher/v2.x/en/cluster-admin/tools/logging/syslog - - /rancher/v2.x/en/logging/legacy/cluster-logging/syslog ---- - -If your organization uses [Syslog](https://tools.ietf.org/html/rfc5424), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Syslog server to view logs. - ->**Prerequisite:** You must have a Syslog server configured. - -If you are using rsyslog, please make sure your rsyslog authentication mode is `x509/name`. - -## Syslog Server Configuration - -1. In the **Endpoint** field, enter the IP address and port for your Syslog server. Additionally, in the dropdown, select the protocol that your Syslog server uses. - -1. In the **Program** field, enter the name of the application sending logs to your Syslog server, e.g. `Rancher`. - -1. If you are using a cloud logging service, e.g. [Sumologic](https://www.sumologic.com/), enter a **Token** that authenticates with your Syslog server. You will need to create this token in the cloud logging service. - -1. Select a **Log Severity** for events that are logged to the Syslog server. For more information on each severity level, see the [Syslog protocol documentation](https://tools.ietf.org/html/rfc5424#page-11). - - - By specifying a **Log Severity** does not mean that will act as a filtering mechanism for logs. To do that you should use a parser on the Syslog server. - -## Encryption Configuration - -If your Syslog server is using **TCP** protocol and uses TLS, you need to select **Use TLS** and complete the **Encryption Configuration** form. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. diff --git a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/project-logging/_index.md b/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/project-logging/_index.md deleted file mode 100644 index 74c738cf5..000000000 --- a/content/rancher/v2.x/en/logging/v2.0.x-v2.4.x/project-logging/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Project Logging in Rancher v2.0.x-v2.4.x -shortTitle: Project Logging -weight: 2527 -aliases: - - /rancher/v2.x/en/project-admin/tools/logging - - /rancher/v2.x/en/logging/legacy/project-logging ---- - -Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. - -For background information about how logging integrations work, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/#how-logging-integrations-work) - -Rancher supports the following services: - -- Elasticsearch -- Splunk -- Kafka -- Syslog -- Fluentd - ->**Note:** You can only configure one logging service per cluster or per project. - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure Rancher to send Kubernetes logs to a logging service. - -## Requirements - -The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: - -``` -$ docker info | grep 'Logging Driver' -Logging Driver: json-file -``` - -## Advantages - -Setting up a logging service to collect logs from your cluster/project has several advantages: - -- Logs errors and warnings in your Kubernetes infrastructure to a stream. The stream informs you of events like a container crashing, a pod eviction, or a node dying. -- Allows you to capture and analyze the state of your cluster and look for trends in your environment using the log stream. -- Helps you when troubleshooting or debugging. -- Saves your logs to a safe location outside of your cluster, so that you can still access them even if your cluster encounters issues. - -## Logging Scope - -You can configure logging at either cluster level or project level. - -- [Cluster logging]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. - -- Project logging writes logs for every pod in that particular project. - -Logs that are sent to your logging service are from the following locations: - - - Pod logs stored at `/var/log/containers`. - - - Kubernetes system components logs stored at `/var/lib/rancher/rke/logs/`. - -## Enabling Project Logging - -1. From the **Global** view, navigate to the project that you want to configure project logging. - -1. Select **Tools > Logging** in the navigation bar. In versions before v2.2.0, you can choose **Resources > Logging**. - -1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports the following services: - - - [Elasticsearch]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/fluentd/) - -1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. - - - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. - - - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) - - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) - - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) - - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) - - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) - - - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. - 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. - -1. (Optional) Complete the **Additional Logging Configuration** form. - - 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. - - 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. - - 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. - -1. Click **Test**. Rancher sends a test log to the service. - - > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. - -1. Click **Save**. - -**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. - -## Related Links - -[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/content/rancher/v2.x/en/logging/v2.5/_index.md b/content/rancher/v2.x/en/logging/v2.5/_index.md deleted file mode 100644 index 851d3ba6c..000000000 --- a/content/rancher/v2.x/en/logging/v2.5/_index.md +++ /dev/null @@ -1,374 +0,0 @@ ---- -title: Logging in Rancher v2.5 -shortTitle: Rancher v2.5 -weight: 1 ---- - -- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) -- [Enabling Logging for Rancher Managed Clusters](#enabling-logging-for-rancher-managed-clusters) -- [Uninstall Logging](#uninstall-logging) -- [Role-based Access Control](#role-based-access-control) -- [Configuring the Logging Application](#configuring-the-logging-application) -- [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) -- [Working with Taints and Tolerations](#working-with-taints-and-tolerations) - -# Changes in Rancher v2.5 - -The following changes were introduced to logging in Rancher v2.5: - -- The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. -- [Fluent Bit](https://fluentbit.io/) is now used to aggregate the logs, and [Fluentd](https://www.fluentd.org/) is used for filtering the messages and routing them to the outputs. Previously, only Fluentd was used. -- Logging can be configured with a Kubernetes manifest, because logging now uses a Kubernetes operator with Custom Resource Definitions. -- We now support filtering logs. -- We now support writing logs to multiple outputs. -- We now always collect Control Plane and etcd logs. - -The following figure from the [Banzai documentation](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) shows the new logging architecture: - -
How the Banzai Cloud Logging Operator Works with Fluentd and Fluent Bit
- -![How the Banzai Cloud Logging Operator Works with Fluentd]({{}}/img/rancher/banzai-cloud-logging-operator.png) - -# Enabling Logging for Rancher Managed Clusters - -You can enable the logging for a Rancher managed cluster by going to the Apps page and installing the logging app. - -1. In the Rancher UI, go to the cluster where you want to install logging and click **Cluster Explorer**. -1. Click **Apps**. -1. Click the `rancher-logging` app. -1. Scroll to the bottom of the Helm chart README and click **Install**. - -**Result:** The logging app is deployed in the `cattle-logging-system` namespace. - -# Uninstall Logging - -1. From the **Cluster Explorer**, click **Apps & Marketplace**. -1. Click **Installed Apps**. -1. Go to the `cattle-logging-system` namespace and check the boxes for `rancher-logging` and `rancher-logging-crd`. -1. Click **Delete**. -1. Confirm **Delete**. - -**Result** `rancher-logging` is uninstalled. - -# Role-based Access Control - -Rancher logging has two roles, `logging-admin` and `logging-view`. - -- `logging-admin` gives users full access to namespaced flows and outputs -- `logging-view` allows users to *view* namespaced flows and outputs, and cluster flows and outputs - -> **Why choose one role over the other?** Edit access to cluster flow and cluster output resources is powerful. Any user with it has edit access for all logs in the cluster. - -In Rancher, the cluster administrator role is the only role with full access to all `rancher-logging` resources. Cluster members are not able to edit or read any logging resources. Project owners and members have the following privileges: - -Project Owners | Project Members ---- | --- -able to create namespaced flows and outputs in their projects' namespaces | only able to view the flows and outputs in projects' namespaces -can collect logs from anything in their projects' namespaces | cannot collect any logs in their projects' namespaces - -Both project owners and project members require at least *one* namespace in their project to use logging. If they do not, then they may not see the logging button in the top nav dropdown. - -# Configuring the Logging Application - -To configure the logging application, go to the **Cluster Explorer** in the Rancher UI. In the upper left corner, click **Cluster Explorer > Logging**. - -### Overview of Logging Custom Resources - -The following Custom Resource Definitions are used to configure logging: - -- [Flow and ClusterFlow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/#flows-clusterflows) -- [Output and ClusterOutput](https://banzaicloud.com/docs/one-eye/logging-operator/crds/#outputs-clusteroutputs) - -According to the [Banzai Cloud documentation,](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) - -> You can define `outputs` (destinations where you want to send your log messages, for example, Elasticsearch, or and Amazon S3 bucket), and `flows` that use filters and selectors to route log messages to the appropriate outputs. You can also define cluster-wide outputs and flows, for example, to use a centralized output that namespaced users cannot modify. - -# Examples - -Once logging is installed, you can use these examples to help craft your own logging pipeline. - -### Cluster Output to ElasticSearch - -Let's say you wanted to send all logs in your cluster to an `elasticsearch` cluster. First, we create a cluster output. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterOutput -metadata: - name: "example-es" - namespace: "cattle-logging-system" -spec: - elasticsearch: - host: elasticsearch.example.com - port: 9200 - scheme: http -``` - -We have created this cluster output, without elasticsearch configuration, in the same namespace as our operator: `cattle-logging-system`. Any time we create a cluster flow or cluster output, we have to put it in the `cattle-logging-system` namespace. - -Now that we have configured where we want the logs to go, let's configure all logs to go to that output. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterFlow -metadata: - name: "all-logs" - namespace: "cattle-logging-system" -spec: - globalOutputRefs: - - "example-es" -``` - -We should now see our configured index with logs in it. - -### Output to Splunk - -What if we have an application team who only wants logs from a specific namespaces sent to a `splunk` server? For this case, we can use namespaced outputs and flows. - -Before we start, let's set up that team's application: `coolapp`. - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: devteam ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coolapp - namespace: devteam - labels: - app: coolapp -spec: - replicas: 2 - selector: - matchLabels: - app: coolapp - template: - metadata: - labels: - app: coolapp - spec: - containers: - - name: generator - image: paynejacob/loggenerator:latest -``` - -With `coolapp` running, we will follow a similar path as when we created a cluster output. However, unlike cluster outputs, we create our output in our application's namespace. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: Output -metadata: - name: "devteam-splunk" - namespace: "devteam" -spec: - SplunkHec: - host: splunk.example.com - port: 8088 - protocol: http -``` - -Once again, let's feed our output some logs. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: Flow -metadata: - name: "devteam-logs" - namespace: "devteam" -spec: - localOutputRefs: - - "devteam-splunk" -``` - -### Unsupported Output - -For the final example, we create an output to write logs to a destination that is not supported out of the box (e.g. syslog): - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: syslog-config - namespace: cattle-logging-system -type: Opaque -stringData: - fluent-bit.conf: | - [INPUT] - Name forward - Port 24224 - - [OUTPUT] - Name syslog - InstanceName syslog-output - Match * - Addr syslog.example.com - Port 514 - Cluster ranchers - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fluentbit-syslog-forwarder - namespace: cattle-logging-system - labels: - output: syslog -spec: - selector: - matchLabels: - output: syslog - template: - metadata: - labels: - output: syslog - spec: - containers: - - name: fluentbit - image: paynejacob/fluent-bit-out-syslog:latest - ports: - - containerPort: 24224 - volumeMounts: - - mountPath: "/fluent-bit/etc/" - name: configuration - volumes: - - name: configuration - secret: - secretName: syslog-config ---- -apiVersion: v1 -kind: Service -metadata: - name: syslog-forwarder - namespace: cattle-logging-system -spec: - selector: - output: syslog - ports: - - protocol: TCP - port: 24224 - targetPort: 24224 ---- -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterFlow -metadata: - name: all-logs - namespace: cattle-logging-system -spec: - globalOutputRefs: - - syslog ---- -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterOutput -metadata: - name: syslog - namespace: cattle-logging-system -spec: - forward: - servers: - - host: "syslog-forwarder.cattle-logging-system" - require_ack_response: false - ignore_network_errors_at_startup: false -``` - -Let's break down what is happening here. First, we create a deployment of a container that has the additional `syslog` plugin and accepts logs forwarded from another `fluentd`. Next we create an output configured as a forwarder to our deployment. The deployment `fluentd` will then forward all logs to the configured `syslog` destination. - -> **Note on syslog** Official `syslog` support is coming in Rancher v2.5.4. However, this example still provides an overview on using unsupported plugins. - -# Working with a Custom Docker Root Directory - -_Applies to v2.5.6+_ - -If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. -This will ensure that the Logging CRs created will use your specified path rather than the default Docker `data-root` location. - -# Working with Taints and Tolerations - -"Tainting" a Kubernetes node causes pods to repel running on that node. -Unless the pods have a `toleration` for that node's taint, they will run on other nodes in the cluster. -[Taints and tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) can work in conjunction with the `nodeSelector` [field](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) within the `PodSpec`, which enables the *opposite* effect of a taint. -Using `nodeSelector` gives pods an affinity towards certain nodes. -Both provide choice for the what node(s) the pod will run on. - -### Default Implementation in Rancher's Logging Stack - -By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. -The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. -Moreover, we can populate the `nodeSelector` to ensure that our pods *only* run on Linux nodes. -Let's look at an example pod YAML file with these settings... - -```yaml -apiVersion: v1 -kind: Pod -# metadata... -spec: - # containers... - tolerations: - - key: cattle.io/os - operator: "Equal" - value: "linux" - effect: NoSchedule - nodeSelector: - kubernetes.io/os: linux -``` - -In the above example, we ensure that our pod only runs on Linux nodes, and we add a `toleration` for the taint we have on all of our Linux nodes. -You can do the same with Rancher's existing taints, or with your own custom ones. - -### Windows Support - -Clusters with Windows workers support exporting logs from Linux nodes, but Windows node logs are currently unable to be exported. -Only Linux node logs are able to be exported. - -### Adding NodeSelector Settings and Tolerations for Custom Taints - -If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. - -```yaml -tolerations: - # insert tolerations... -nodeSelector: - # insert nodeSelector... -``` - -These values will add both settings to the `fluentd`, `fluentbit`, and `logging-operator` containers. -Essentially, these are global settings for all pods in the logging stack. - -However, if you would like to add tolerations for *only* the `fluentbit` container, you can add the following to the chart's values. - -```yaml -fluentbit_tolerations: - # insert tolerations list for fluentbit containers only... -``` - -# Troubleshooting - -### The `cattle-logging` Namespace Being Recreated - -If your cluster previously deployed logging from the Cluster Manager UI, you may encounter an issue where its `cattle-logging` namespace is continually being recreated. - -The solution is to delete all `clusterloggings.management.cattle.io` and `projectloggings.management.cattle.io` custom resources from the cluster specific namespace in the management cluster. -The existence of these custom resources causes Rancher to create the `cattle-logging` namespace in the downstream cluster if it does not exist. - -The cluster namespace matches the cluster ID, so we need to find the cluster ID for each cluster. - -1. In your web browser, navigate to your cluster(s) in either the Cluster Manager UI or the Cluster Explorer UI. -2. Copy the `` portion from one of the URLs below. The `` portion is the cluster namespace name. - -```bash -# Cluster Management UI -https:///c// - -# Cluster Explorer UI (Dashboard) -https:///dashboard/c// -``` - -Now that we have the `` namespace, we can delete the CRs that cause `cattle-logging` to be continually recreated. -*Warning:* ensure that logging, the version installed from the Cluster Manager UI, is not currently in use. - -```bash -kubectl delete clusterloggings.management.cattle.io -n -kubectl delete projectloggings.management.cattle.io -n -``` diff --git a/content/rancher/v2.x/en/logging/v2.5/migrating/_index.md b/content/rancher/v2.x/en/logging/v2.5/migrating/_index.md deleted file mode 100644 index c568a5818..000000000 --- a/content/rancher/v2.x/en/logging/v2.5/migrating/_index.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: Migrating to Rancher v2.5 Logging -weight: 5 -aliases: - - /rancher/v2.x/en/logging/migrating ---- -Starting in v2.5, the logging feature available within Rancher has been completely overhauled. The [logging operator](https://github.com/banzaicloud/logging-operator) from Banzai Cloud has been adopted; Rancher configures this tooling for use when deploying logging. - -Among the many features and changes in the new logging functionality is the removal of project-specific logging configurations. Instead, one now configures logging at the namespace level. Cluster-level logging remains available, but configuration options differ. - -> Note: The pre-v2.5 user interface is now referred to as the _Cluster Manager_. The v2.5+ dashboard is referred to as the _Cluster Explorer_. - -## Installation - -To install logging in Rancher v2.5+, refer to [installation instructions]({{}}/rancher/v2.x/en/logging/v2.5/#enabling-logging-for-rancher-managed-clusters). - -## Terminology & Familiarity - -In v2.5, logging configuration is centralized under a _Logging_ menu option available in the _Cluster Explorer_. It is from this menu option that logging for both cluster and namespace is configured. - -> Note: Logging is installed on a per-cluster basis. You will need to navigate between clusters to configure logging for each cluster. - -There are four key concepts to understand for v2.5+ logging: - -1. Outputs - - _Outputs_ are a configuration resource that determine a destination for collected logs. This is where settings for aggregators such as ElasticSearch, Kafka, etc. are stored. _Outputs_ are namespaced resources. - -2. Flows - - _Flows_ are a configuration resource that determine collection, filtering, and destination rules for logs. It is within a flow that one will configure what logs to collect, how to mutate or filter them, and which outputs to send the logs to. _Flows_ are namespaced resources, and can connect either to an _Output_ in the same namespace, or a _ClusterOutput_. - -3. ClusterOutputs - - _ClusterOutputs_ serve the same functionality as _Outputs_, except they are a cluster-scoped resource. _ClusterOutputs_ are necessary when collecting logs cluster-wide, or if you wish to provide an output to all namespaces in your cluster. - -4. ClusterFlows - - _ClusterFlows_ serve the same function as _Flows_, but at the cluster level. They are used to configure log collection for an entire cluster, instead of on a per-namespace level. _ClusterFlows_ are also where mutations and filters are defined, same as _Flows_ (in functionality). - -# Cluster Logging - -To configure cluster-wide logging for v2.5+ logging, one needs to setup a _ClusterFlow_. This object defines the source of logs, any transformations or filters to be applied, and finally the output(s) for the logs. - -> Important: _ClusterFlows_ must be defined within the `cattle-logging-system` namespace. _ClusterFlows_ will not work if defined in any other namespace. - -In legacy logging, in order to collect logs from across the entire cluster, one only needed to enable cluster-level logging and define the desired output. This basic approach remains in v2.5+ logging. To replicate legacy cluster-level logging, follow these steps: - -1. Define a _ClusterOutput_ according to the instructions found under [Output Configuration](#output-configuration) -2. Create a _ClusterFlow_, ensuring that it is set to be created in the `cattle-logging-system` namespace - 1. Remove all _Include_ and _Exclude_ rules from the flow definition. This ensures that all logs are gathered. - 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation - 3. Define your cluster output(s) - -This will result in logs from all sources in the cluster (all pods, and all system components) being collected and sent to the output(s) you defined in the _ClusterFlow_. - -# Project Logging - -Logging in v2.5+ is not project-aware. This means that in order to collect logs from pods running in project namespaces, you will need to define _Flows_ for those namespaces. - -To collect logs from a specific namespace, follow these steps: - -1. Define an _Output_ or _ClusterOutput_ according to the instructions found under [Output Configuration](#output-configuration) -2. Create a _Flow_, ensuring that it is set to be created in the namespace in which you want to gather logs. - 1. If you wish to define _Include_ or _Exclude_ rules, you may do so. Otherwise, removal of all rules will result in all pods in the target namespace having their logs collected. - 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation - 3. Define your output(s) - these can be either _ClusterOutput_ or _Output_ objects. - -This will result in logs from all sources in the namespace (pods) being collected and sent to the output(s) you defined in your _Flow_. - -> To collect logs from a project, repeat the above steps for every namespace within the project. Alternatively, you can label your project workloads with a common label (e.g. `project=my-project`) and use a _ClusterFlow_ to collect logs from all pods matching this label. - -# Output Configuration -In legacy logging, there are five logging destinations to choose from: Elasticsearch, Splunk, Kafka, Fluentd, and Syslog. With the exception of Syslog, all of these destinations are available in logging v2.5+. - - -## Elasticsearch - -| Legacy Logging | v2.5+ Logging | Notes | -|-----------------------------------------------|-----------------------------------|-----------------------------------------------------------| -| Endpoint | Target -> Host | Make sure to specify Scheme (https/http), as well as Port | -| X-Pack Security -> Username | Access -> User | | -| X-Pack Security -> Password | Access -> Password | Password must now be stored in a secret | -| SSL Configuration -> Client Private Key | SSL -> Client Key | Key must now be stored in a secret | -| SSL Configuration -> Client Certificate | SSL -> Client Cert | Certificate must now be stored in a secret | -| SSL Configuration -> Client Key Password | SSL -> Client Key Pass | Password must now be stored in a secret | -| SSL Configuration -> Enabled SSL Verification | SSL -> Certificate Authority File | Certificate must now be stored in a secret | - - -In legacy logging, indices were automatically created according to the format in the "Index Patterns" section. In v2.5 logging, default behavior has been changed to logging to a single index. You can still configure index pattern functionality on the output object by editing as YAML and inputting the following values: - -``` -... -spec: - elasticsearch: - ... - logstash_format: true - logstash_prefix: - logstash_dateformat: "%Y-%m-%d" -``` - -Replace `` with the prefix for the indices that will be created. In legacy logging, this defaulted to the name of the cluster. - -## Splunk - -| Legacy Logging | v2.5+ Logging | Notes | -|------------------------------------------|----------------------------------------|----------------------------------------------------------------------------------------| -| HEC Configuration -> Endpoint | Target -> Host | Protocol (https/http) and port must be defined separately from the host | -| HEC Configuration -> Token | Access -> Token | Token must now be stored as a secret | -| HEC Configuration -> Index | Edit as YAML -> `index` | `index` field must be added as YAML key under `spec.splunkHec` | -| HEC Configuration -> Source | Edit as YAML -> `source` | `source` field must be added as YAML key under `spec.splunkHec` | -| SSL Configuration -> Client Private Key | Edit as YAML -> `client_key` | `client_key` field must be added as YAML key under `spec.splunkHec`. See (1) | -| SSL Configuration -> Client Certificate | Edit as YAML -> `client_cert` | `client_cert` field must be added as YAML key under `spec.splunkHec`. See (1) | -| SSL Configuration -> Client Key Password | _Not Supported_ | Specifying a password for the client private key is not currently supported. | -| SSL Configuration -> SSL Verify | Edit as YAML -> `ca_file` or `ca_path` | `ca_file` or `ca_path` field must be added as YAML key under `spec.splunkHec`. See (2) | - -_(1) `client_key` and `client_cert` values must be paths to the key and cert files, respectively. These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ - -_(2) Users can configure either `ca_file` (a path to a PEM-encoded CA certificate) or `ca_path` (a path to a directory containing CA certificates in PEM format). These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ - -## Kafka - -| Legacy Logging | v2.5+ Logging | Notes | -|-----------------------------------------|----------------------------|------------------------------------------------------| -| Kafka Configuration -> Endpoint Type | - | Zookeeper is no longer supported as an endpoint type | -| Kafka Configuration -> Endpoint | Target -> Brokers | Comma-separated list of brokers (host:port) | -| Kafka Configuration -> Topic | Target -> Default Topic | | -| SSL Configuration -> Client Private Key | SSL -> SSL Client Cert | Certificate must be stored as a secret | -| SSL Configuration -> Client Certificate | SSL -> SSL Client Cert Key | Key must be stored as a secret | -| SSL Configuration -> CA Certificate PEM | SSL -> SSL CA Cert | Certificate must be stored as a secret | -| SASL Configuration -> Username | Access -> Username | Username must be stored in a secret | -| SASL Configuration -> Password | Access -> Password | Password must be stored in a secret | -| SASL Configuration -> Scram Mechanism | Access -> Scram Mechanism | Input mechanism as string, e.g. "sha256" or "sha512" | - -## Fluentd - -As of v2.5.2, it is only possible to add a single Fluentd server using the "Edit as Form" option. To add multiple servers, edit the output as YAML and input multiple servers. - -| Legacy Logging | v2.5+ Logging | Notes | -|------------------------------------------|-----------------------------------------------------|----------------------------------------------------------------------| -| Fluentd Configuration -> Endpoint | Target -> Host, Port | Input the host and port separately | -| Fluentd Configuration -> Shared Key | Access -> Shared Key | Shared key must be stored as a secret | -| Fluentd Configuration -> Username | Access -> Username | Username must be stored as a secret | -| Fluentd Configuration -> Password | Access -> Password | Password must be stored as a secret | -| Fluentd Configuration -> Hostname | Edit as YAML -> `host` | `host` field set as YAML key under `spec.forward.servers[n]` | -| Fluentd Configuration -> Weight | Edit as YAML -> `weight` | `weight` field set as YAML key under `spec.forward.servers[n]` | -| SSL Configuration -> Use TLS | - | Do not need to explicitly enable. Define client cert fields instead. | -| SSL Configuration -> Client Private Key | Edit as YAML -> `tls_private_key_path` | Field set as YAML key under `spec.forward`. See (1) | -| SSL Configuration -> Client Certificate | Edit as YAML -> `tls_client_cert_path` | Field set as YAML key under `spec.forward`. See (1) | -| SSL Configuration -> Client Key Password | Edit as YAML -> `tls_client_private_key_passphrase` | Field set as YAML key under `spec.forward`. See (1) | -| SSL Configuration -> SSL Verify | Edit as YAML -> `tls_insecure_mode` | Field set as YAML key under `spec.forward`. Default: `false` | -| SSL Configuration -> CA Certificate PEM | Edit as YAML -> `tls_cert_path` | Field set as YAML key under `spec.forward`. See (1) | -| Enable Gzip Compression | - | No longer supported in v2.5+ logging | - -_(1) These values are to be specified as paths to files. Those files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ - -## Syslog - -As of v2.5.2, syslog is not currently supported as an output using v2.5+ logging. - -## Custom Log Fields - -In order to add custom log fields, you will need to add the following YAML to your flow configuration: - -``` -... -spec: - filters: - - record_modifier: - records: - - foo: "bar" -``` - -(replace `foo: "bar"` with custom log fields you wish to add) - -# System Logging - -In legacy logging, collecting logs from system components was accomplished by checking a box labeled "Include System Log" when setting up cluster logging. In v2.5+ logging, system logs are gathered in one of two ways: - -1. Gather all cluster logs, not specifying any match or exclusion rules. This results in all container logs from the cluster being collected, which includes system logs. -2. Specifically target system logs by adding match rules for system components. Specific match rules depend on the component being collected. \ No newline at end of file diff --git a/content/rancher/v2.x/en/longhorn/_index.md b/content/rancher/v2.x/en/longhorn/_index.md deleted file mode 100644 index ed5a42b13..000000000 --- a/content/rancher/v2.x/en/longhorn/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Longhorn - Cloud native distributed block storage for Kubernetes -shortTitle: Longhorn Storage -weight: 19 ---- - -[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. - -Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. You can learn more about its architecture [here.](https://longhorn.io/docs/1.0.2/concepts/) - -With Longhorn, you can: - -- Use Longhorn volumes as persistent storage for the distributed stateful applications in your Kubernetes cluster -- Partition your block storage into Longhorn volumes so that you can use Kubernetes volumes with or without a cloud provider -- Replicate block storage across multiple nodes and data centers to increase availability -- Store backup data in external storage such as NFS or AWS S3 -- Create cross-cluster disaster recovery volumes so that data from a primary Kubernetes cluster can be quickly recovered from backup in a second Kubernetes cluster -- Schedule recurring snapshots of a volume, and schedule recurring backups to NFS or S3-compatible secondary storage -- Restore volumes from backup -- Upgrade Longhorn without disrupting persistent volumes - -
Longhorn Dashboard
-![Longhorn Dashboard]({{}}/img/rancher/longhorn-screenshot.png) - -### New in Rancher v2.5 - -Before Rancher v2.5, Longhorn could be installed as a Rancher catalog app. In Rancher v2.5, the catalog system was replaced by the **Apps & Marketplace,** and it became possible to install Longhorn as an app from that page. - -The **Cluster Explorer** now allows you to manipulate Longhorn's Kubernetes resources from the Rancher UI. So now you can control the Longhorn functionality with the Longhorn UI, or with kubectl, or by manipulating Longhorn's Kubernetes custom resources in the Rancher UI. - -These instructions assume you are using Rancher v2.5, but Longhorn can be installed with earlier Rancher versions. For documentation about installing Longhorn as a catalog app using the legacy Rancher UI, refer to the [Longhorn documentation.](https://longhorn.io/docs/1.0.2/deploy/install/install-with-rancher/) - -### Installing Longhorn with Rancher - -1. Go to the **Cluster Explorer** in the Rancher UI. -1. Click **Apps.** -1. Click `longhorn`. -1. Optional: To customize the initial settings, click **Longhorn Default Settings** and edit the configuration. For help customizing the settings, refer to the [Longhorn documentation.](https://longhorn.io/docs/1.0.2/references/settings/) -1. Click **Install.** - -**Result:** Longhorn is deployed in the Kubernetes cluster. - -### Accessing Longhorn from the Rancher UI - -1. From the **Cluster Explorer," go to the top left dropdown menu and click **Cluster Explorer > Longhorn.** -1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview**section. - -**Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. - -### Uninstalling Longhorn from the Rancher UI - -1. Click **Cluster Explorer > Apps & Marketplace.** -1. Click **Installed Apps.** -1. Go to the `longhorn-system` namespace and check the boxes next to the `longhorn` and `longhorn-crd` apps. -1. Click **Delete,** and confirm **Delete.** - -**Result:** Longhorn is uninstalled. - -### GitHub Repository - -The Longhorn project is available [here.](https://github.com/longhorn/longhorn) - -### Documentation - -The Longhorn documentation is [here.](https://longhorn.io/docs/) - -### Architecture - -Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. - -The storage controller and replicas are themselves orchestrated using Kubernetes. - -You can learn more about its architecture [here.](https://longhorn.io/docs/1.0.2/concepts/) - -
Longhorn Architecture
-![Longhorn Architecture]({{}}/img/rancher/longhorn-architecture.svg) \ No newline at end of file diff --git a/content/rancher/v2.x/en/monitoring-alerting/_index.md b/content/rancher/v2.x/en/monitoring-alerting/_index.md deleted file mode 100644 index 987477db3..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Monitoring and Alerting -shortTitle: Monitoring/Alerting -description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring -weight: 14 -aliases: - - /rancher/v2.x/en/dashboard/monitoring-alerting - - /rancher/v2.x/en/dashboard/notifiers - - /rancher/v2.x/en/cluster-admin/tools/monitoring/ ---- - -If you are using Rancher v2.5, refer to [this section.](./v2.5) - -If you are using Rancher v2.0-v2.4, refer to [this section.](./v2.0.x-v2.4.x) \ No newline at end of file diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/_index.md deleted file mode 100644 index 8a5fc1777..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Monitoring in Rancher v2.0-v2.4 -shortTitle: Rancher v2.0-v2.4 -weight: 2 ---- - -This section contains documentation related to the monitoring features available in Rancher before v2.5. - - - -- [Cluster Monitoring](./cluster-monitoring) - - [Project Monitoring](./cluster-monitoring/project-monitoring) -- [Cluster Alerts](./cluster-alerts) - - [Project Alerts](./cluster-alerts/project-alerts) -- [Notifiers](./notifiers) diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/_index.md deleted file mode 100644 index e89f6b859..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/_index.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: Cluster Alerts -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/alerts - - /rancher/v2.x/en/monitoring-alerting/legacy/alerts/cluster-alerts ---- - -> In Rancher 2.5, the monitoring application was improved. There are now two ways to enable monitoring and alerting. The older way is documented in this section, and the new application for monitoring and alerting is documented [here.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/) - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -This section covers the following topics: - -- [About Alerts](#about-alerts) - - [Alert Event Examples](#alert-event-examples) - - [Alerts Triggered by Prometheus Queries](#alerts-triggered-by-prometheus-queries) - - [Urgency Levels](#urgency-levels) - - [Scope of Alerts](#scope-of-alerts) - - [Managing Cluster Alerts](#managing-cluster-alerts) -- [Adding Cluster Alerts](#adding-cluster-alerts) -- [Cluster Alert Configuration](#cluster-alert-configuration) - - [System Service Alerts](#system-service-alerts) - - [Resource Event Alerts](#resource-event-alerts) - - [Node Alerts](#node-alerts) - - [Node Selector Alerts](#node-selector-alerts) - - [CIS Scan Alerts](#cis-scan-alerts) - - [Metric Expression Alerts](#metric-expression-alerts) - -# About Alerts - -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -Before you can receive alerts, you must configure one or more notifier in Rancher. - -When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. - -For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) - -### Alert Event Examples - -Some examples of alert events are: - -- A Kubernetes [master component]({{}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. -- A node or [workload]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. -- A scheduled deployment taking place as planned. -- A node's hardware resources becoming overstressed. - -### Alerts Triggered by Prometheus Queries - -When you edit an alert rule, you will have the opportunity to configure the alert to be triggered based on a Prometheus expression. For examples of expressions, refer to [this page.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/) - -Monitoring must be [enabled]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/#enabling-cluster-monitoring) before you can trigger alerts with custom Prometheus queries or expressions. - -### Urgency Levels - -You can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. - -### Scope of Alerts - -The scope for alerts can be set at either the cluster level or [project level]({{}}/rancher/v2.x/en/project-admin/tools/alerts/). - -At the cluster level, Rancher monitors components in your Kubernetes cluster, and sends you alerts related to: - -- The state of your nodes. -- The system services that manage your Kubernetes cluster. -- The resource events from specific system services. -- The Prometheus expression cross the thresholds - -### Managing Cluster Alerts - -After you set up cluster alerts, you can manage each alert object. To manage alerts, browse to the cluster containing the alerts, and then select **Tools > Alerts** that you want to manage. You can: - -- Deactivate/Reactive alerts -- Edit alert settings -- Delete unnecessary alerts -- Mute firing alerts -- Unmute muted alerts - -# Adding Cluster Alerts - -As a [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. - ->**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/notifiers/#adding-notifiers). - -1. From the **Global** view, navigate to the cluster that you want to configure cluster alerts for. Select **Tools > Alerts**. Then click **Add Alert Group**. -1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. -1. Based on the type of alert you want to create, refer to the [cluster alert configuration section.](#cluster-alert-configuration) -1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) to send the alerts to. - - - You can set up multiple notifiers. - - You can change notifier recipients on the fly. -1. Click **Create.** - -**Result:** Your alert is configured. A notification is sent when the alert is triggered. - - -# Cluster Alert Configuration - - - [System Service Alerts](#system-service-alerts) - - [Resource Event Alerts](#resource-event-alerts) - - [Node Alerts](#node-alerts) - - [Node Selector Alerts](#node-selector-alerts) - - [CIS Scan Alerts](#cis-scan-alerts) - - [Metric Expression Alerts](#metric-expression-alerts) - -# System Service Alerts - -This alert type monitor for events that affect one of the Kubernetes master components, regardless of the node it occurs on. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **System Services** option, and then select an option from the dropdown: - -- [controller-manager](https://kubernetes.io/docs/concepts/overview/components/#kube-controller-manager) -- [etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) -- [scheduler](https://kubernetes.io/docs/concepts/overview/components/#kube-scheduler) - -### Is - -The alert will be triggered when the selected Kubernetes master component is unhealthy. - -### Send a - -Select the urgency level of the alert. The options are: - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - - Select the urgency level based on the importance of the service and how many nodes fill the role within your cluster. For example, if you're making an alert for the `etcd` service, select **Critical**. If you're making an alert for redundant schedulers, **Warning** is more appropriate. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Resource Event Alerts - -This alert type monitors for specific events that are thrown from a resource type. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Choose the type of resource event that triggers an alert. The options are: - -- **Normal**: triggers an alert when any standard resource event occurs. -- **Warning**: triggers an alert when unexpected resource events occur. - -Select a resource type from the **Choose a Resource** drop-down that you want to trigger an alert. - -- [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) -- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) -- [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) -- [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert by considering factors such as how often the event occurs or its importance. For example: - -- If you set a normal alert for pods, you're likely to receive alerts often, and individual pods usually self-heal, so select an urgency of **Info**. -- If you set a warning alert for StatefulSets, it's very likely to impact operations, so select an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Node Alerts - -This alert type monitors for events that occur on a specific node. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Node** option, and then make a selection from the **Choose a Node** drop-down. - -### Is - -Choose an event to trigger the alert. - -- **Not Ready**: Sends you an alert when the node is unresponsive. -- **CPU usage over**: Sends you an alert when the node raises above an entered percentage of its processing allocation. -- **Mem usage over**: Sends you an alert when the node raises above an entered percentage of its memory allocation. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Node Selector Alerts - -This alert type monitors for events that occur on any node on marked with a label. For more information, see the Kubernetes documentation for [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Node Selector** option, and then click **Add Selector** to enter a key value pair for a label. This label should be applied to one or more of your nodes. Add as many selectors as you'd like. - -### Is - -Choose an event to trigger the alert. - -- **Not Ready**: Sends you an alert when selected nodes are unresponsive. -- **CPU usage over**: Sends you an alert when selected nodes raise above an entered percentage of processing allocation. -- **Mem usage over**: Sends you an alert when selected nodes raise above an entered percentage of memory allocation. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# CIS Scan Alerts -_Available as of v2.4.0_ - -This alert type is triggered based on the results of a CIS scan. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select **CIS Scan.** - -### Is - -Choose an event to trigger the alert: - -- Completed Scan -- Has Failure - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Metric Expression Alerts - -This alert type monitors for the overload from Prometheus expression querying, it would be available after you enable monitoring. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Input or select an **Expression**, the dropdown shows the original metrics from Prometheus, including: - -- [**Node**](https://github.com/prometheus/node_exporter) -- [**Container**](https://github.com/google/cadvisor) -- [**ETCD**](https://etcd.io/docs/v3.4.0/op-guide/monitoring/) -- [**Kubernetes Components**](https://github.com/kubernetes/metrics) -- [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) -- [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{}}/rancher/v2.x//en/cluster-admin/tools/logging)) -- [**Cluster Level Grafana**](http://docs.grafana.org/administration/metrics/) -- **Cluster Level Prometheus** - -### Is - -Choose a comparison: - -- **Equal**: Trigger alert when expression value equal to the threshold. -- **Not Equal**: Trigger alert when expression value not equal to the threshold. -- **Greater Than**: Trigger alert when expression value greater than to threshold. -- **Less Than**: Trigger alert when expression value equal or less than the threshold. -- **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. -- **Less or Equal**: Trigger alert when expression value less or equal to the threshold. - -If applicable, choose a comparison value or a threshold for the alert to be triggered. - -### For - -Select a duration for a trigger alert when the expression value crosses the threshold longer than the configured duration. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's load expression ```sum(node_load5) / count(node_cpu_seconds_total{mode="system"})``` raises above 0.6 deems an urgency of **Info**, but 1 deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts/_index.md deleted file mode 100644 index aaeff9e5a..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts/_index.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Default Alerts for Cluster Monitoring -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts - - /rancher/v2.x/en/monitoring-alerting/legacy/alerts/cluster-alerts/default-alerts ---- - -When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. - -Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). - -# Alerts for etcd -Etcd is the key-value store that contains the state of the Kubernetes cluster. Rancher provides default alerts if the built-in monitoring detects a potential problem with etcd. You don't have to enable monitoring to receive these alerts. - -A leader is the node that handles all client requests that need cluster consensus. For more information, you can refer to this [explanation of how etcd works.](https://rancher.com/blog/2019/2019-01-29-what-is-etcd/#how-does-etcd-work) - -The leader of the cluster can change in response to certain events. It is normal for the leader to change, but too many changes can indicate a problem with the network or a high CPU load. With longer latencies, the default etcd configuration may cause frequent heartbeat timeouts, which trigger a new leader election. - -| Alert | Explanation | -|-------|-------------| -| A high number of leader changes within the etcd cluster are happening | A warning alert is triggered when the leader changes more than three times in one hour. | -| Database usage close to the quota 500M | A warning alert is triggered when the size of etcd exceeds 500M.| -| Etcd is unavailable | A critical alert is triggered when etcd becomes unavailable. | -| Etcd member has no leader | A critical alert is triggered when the etcd cluster does not have a leader for at least three minutes. | - - -# Alerts for Kubernetes Components -Rancher provides alerts when core Kubernetes system components become unhealthy. - -Controllers update Kubernetes resources based on changes in etcd. The [controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. - -The [scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) service is a core component of Kubernetes. It is responsible for scheduling cluster workloads to nodes, based on various configurations, metrics, resource requirements and workload-specific requirements. - -| Alert | Explanation | -|-------|-------------| -| Controller Manager is unavailable | A critical warning is triggered when the cluster’s controller-manager becomes unavailable. | -| Scheduler is unavailable | A critical warning is triggered when the cluster’s scheduler becomes unavailable. | - - -# Alerts for Events -Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. In the Rancher UI, from the project view, you can see events for each workload. - -| Alert | Explanation | -|-------|-------------| -| Get warning deployment event | A warning alert is triggered when a warning event happens on a deployment. | - - -# Alerts for Nodes -Alerts can be triggered based on node metrics. Each computing resource in a Kubernetes cluster is called a node. [Nodes]({{}}/rancher/v2.x/en/cluster-admin/#kubernetes-cluster-node-components) can be either bare-metal servers or virtual machines. - -| Alert | Explanation | -|-------|-------------| -| High CPU load | A warning alert is triggered if the node uses more than 100 percent of the node’s available CPU seconds for at least three minutes. | -| High node memory utilization | A warning alert is triggered if the node uses more than 80 percent of its available memory for at least three minutes. | -| Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | - -# Project-level Alerts -When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.]({{}}/rancher/v2.x/en/project-admin/tools/alerts/#default-project-level-alerts) diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts/_index.md deleted file mode 100644 index 7d57bd668..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts/_index.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -title: Project Alerts -weight: 2526 -aliases: - - /rancher/v2.x/en/project-admin/tools/alerts - - /rancher/v2.x/en/monitoring-alerting/legacy/alerts/project-alerts ---- - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -Before you can receive alerts, one or more [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) must be configured at the cluster level. - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. - -This section covers the following topics: - -- [Alerts scope](#alerts-scope) -- [Default project-level alerts](#default-project-level-alerts) -- [Adding project alerts](#adding-project-alerts) -- [Managing project alerts](#managing-project-alerts) -- [Project Alert Rule Configuration](#project-alert-rule-configuration) - - [Pod Alerts](#pod-alerts) - - [Workload Alerts](#workload-alerts) - - [Workload Selector Alerts](#workload-selector-alerts) - - [Metric Expression Alerts](#metric-expression-alerts) - - -# Alerts Scope - -The scope for alerts can be set at either the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. - -At the project level, Rancher monitors specific deployments and sends alerts for: - -* Deployment availability -* Workloads status -* Pod status -* The Prometheus expression cross the thresholds - -# Default Project-level Alerts - -When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them is configured at the cluster level. - -| Alert | Explanation | -|-------|-------------| -| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | -| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | - -For information on other default alerts, refer to the section on [cluster-level alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) - -# Adding Project Alerts - ->**Prerequisite:** Before you can receive project alerts, you must add a notifier. - -1. From the **Global** view, navigate to the project that you want to configure project alerts for. Select **Tools > Alerts**. In versions before v2.2.0, you can choose **Resources > Alerts**. - -1. Click **Add Alert Group**. - -1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. - -1. Based on the type of alert you want to create, fill out the form. For help, refer to the [configuration](#project-alert-rule-configuration) section below. - -1. Continue adding more alert rules to the group. - -1. Finally, choose the [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) that send you alerts. - - - You can set up multiple notifiers. - - You can change notifier recipients on the fly. - -1. Click **Create.** - -**Result:** Your alert is configured. A notification is sent when the alert is triggered. - - -# Managing Project Alerts - -To manage project alerts, browse to the project that alerts you want to manage. Then select **Tools > Alerts**. In versions before v2.2.0, you can choose **Resources > Alerts**. You can: - -- Deactivate/Reactive alerts -- Edit alert settings -- Delete unnecessary alerts -- Mute firing alerts -- Unmute muted alerts - - -# Project Alert Rule Configuration - -- [Pod Alerts](#pod-alerts) -- [Workload Alerts](#workload-alerts) -- [Workload Selector Alerts](#workload-selector-alerts) -- [Metric Expression Alerts](#metric-expression-alerts) - -# Pod Alerts - -This alert type monitors for the status of a specific pod. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Pod** option, and then select a pod from the drop-down. - -### Is - -Select a pod status that triggers an alert: - -- **Not Running** -- **Not Scheduled** -- **Restarted times within the last Minutes** - -### Send a - -Select the urgency level of the alert. The options are: - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on pod state. For example, select **Info** for Job pod which stop running after job finished. However, if an important pod isn't scheduled, it may affect operations, so choose **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. - -You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -# Workload Alerts - -This alert type monitors for the availability of a workload. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Choose the **Workload** option. Then choose a workload from the drop-down. - -### Is - -Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on the percentage you choose and the importance of the workload. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. - -You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -# Workload Selector Alerts - -This alert type monitors for the availability of all workloads marked with tags that you've specified. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Workload Selector** option, and then click **Add Selector** to enter the key value pair for a label. If one of the workloads drops below your specifications, an alert is triggered. This label should be applied to one or more of your workloads. - -### Is - -Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on the percentage you choose and the importance of the workload. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. - -You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -# Metric Expression Alerts -_Available as of v2.2.4_ - -If you enable [project monitoring]({{}}/rancher/v2.x/en/project-admin/tools/#monitoring), this alert type monitors for the overload from Prometheus expression querying. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When A - -Input or select an **Expression**. The dropdown shows the original metrics from Prometheus, including: - -- [**Container**](https://github.com/google/cadvisor) -- [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) -- [**Customize**]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#project-metrics) -- [**Project Level Grafana**](http://docs.grafana.org/administration/metrics/) -- **Project Level Prometheus** - -### Is - -Choose a comparison. - -- **Equal**: Trigger alert when expression value equal to the threshold. -- **Not Equal**: Trigger alert when expression value not equal to the threshold. -- **Greater Than**: Trigger alert when expression value greater than to threshold. -- **Less Than**: Trigger alert when expression value equal or less than the threshold. -- **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. -- **Less or Equal**: Trigger alert when expression value less or equal to the threshold. - -If applicable, choose a comparison value or a threshold for the alert to be triggered. - -### For - -Select a duration for a trigger alert when the expression value crosses the threshold longer than the configured duration. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a expression for container memory close to the limit raises above 60% deems an urgency of **Info**, but raised about 95% deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. \ No newline at end of file diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/_index.md deleted file mode 100644 index 564282860..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Integrating Rancher and Prometheus for Cluster Monitoring -shortTitle: Cluster Monitoring -description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring -weight: 1 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/cluster-monitoring ---- - -_Available as of v2.2.0_ - -> In Rancher 2.5, the monitoring application was improved. There are now two ways to enable monitoring and alerting. The older way is documented in this section, and the new application for monitoring and alerting is documented in the [dashboard section.]({{}}/rancher/v2.x/en/dashboard/monitoring-alerting) - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -This section covers the following topics: - -- [About Prometheus](#about-prometheus) -- [Monitoring scope](#monitoring-scope) -- [Enabling cluster monitoring](#enabling-cluster-monitoring) -- [Resource consumption](#resource-consumption) - - [Resource consumption of Prometheus pods](#resource-consumption-of-prometheus-pods) - - [Resource consumption of other pods](#resource-consumption-of-other-pods) - -# About Prometheus - -Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): - -You can configure these services to collect logs at either the cluster level or the project level. This page describes how to enable monitoring for a cluster. For details on enabling monitoring for a project, refer to the [project administration section]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/). - ->A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. - -In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. - -By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. - -Multi-tenancy support in terms of cluster-only and project-only Prometheus instances are also supported. - -# Monitoring Scope - -Using Prometheus, you can monitor Rancher at both the cluster level and [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - -- Cluster monitoring allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - - [Kubernetes control plane]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/#kubernetes-components-metrics) - - [etcd database]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/#etcd-metrics) - - [All nodes (including workers)]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/#cluster-metrics) - -- [Project monitoring]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. - -# Enabling Cluster Monitoring - -As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. - -> **Prerequisites:** The following TCP ports need to be opened for metrics scraping: -> -> | Port | Node type | Component | -> | --- | --- | --- | -> | 9796 | Worker | Node exporter | -> | 10254 | Worker | Nginx Ingress Controller | -> | 10250 | Worker/Controlplane | Kubelet | -> | 10251 | Controlplane | Kube scheduler | -> | 10252 | Controlplane | Kube controller manager | -> | 2379 | Etcd | Etcd server | - -> Monitoring V1 requires a Kubernetes verison less than or equal to v1.20.x. To install monitoring on Kubernetes v1.21+, you will need to [migrate to Monitoring V2.]({{}}/rancher/v2.5/en/monitoring-alerting/migrating/) - -1. From the **Global** view, navigate to the cluster that you want to configure cluster monitoring. - -1. Select **Tools > Monitoring** in the navigation bar. - -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. - -1. Click **Save**. - -**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/) through the Rancher dashboard or directly from Grafana. - -> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. - -# Resource Consumption - -When enabling cluster monitoring, you need to ensure your worker nodes and Prometheus pod have enough resources. The tables below provides a guide of how much resource consumption will be used. In larger deployments, it is strongly advised that the monitoring infrastructure be placed on dedicated nodes in the cluster. - -### Resource Consumption of Prometheus Pods - -This table is the resource consumption of the Prometheus pod, which is based on the number of all the nodes in the cluster. The count of nodes includes the worker, control plane and etcd nodes. Total disk space allocation should be approximated by the `rate * retention` period set at the cluster level. When enabling cluster level monitoring, you should adjust the CPU and Memory limits and reservation. - -Number of Cluster Nodes | CPU (milli CPU) | Memory | Disk -------------------------|-----|--------|------ -5 | 500 | 650 MB | ~1 GB/Day -50| 2000 | 2 GB | ~5 GB/Day -256| 4000 | 6 GB | ~18 GB/Day - -Additional pod resource requirements for cluster level monitoring. - -| Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable | -|---------------------|---------------------------------|---------------|---------------|-------------|-------------|--------------| -| Prometheus | prometheus | 750m | 750Mi | 1000m | 1000Mi | Y | -| | prometheus-proxy | 50m | 50Mi | 100m | 100Mi | Y | -| | prometheus-auth | 100m | 100Mi | 500m | 200Mi | Y | -| | prometheus-config-reloader | - | - | 50m | 50Mi | N | -| | rules-configmap-reloader | - | - | 100m | 25Mi | N | -| Grafana | grafana-init-plugin-json-copy | 50m | 50Mi | 50m | 50Mi | Y | -| | grafana-init-plugin-json-modify | 50m | 50Mi | 50m | 50Mi | Y | -| | grafana | 100m | 100Mi | 200m | 200Mi | Y | -| | grafana-proxy | 50m | 50Mi | 100m | 100Mi | Y | -| Kube-State Exporter | kube-state | 100m | 130Mi | 100m | 200Mi | Y | -| Node Exporter | exporter-node | 200m | 200Mi | 200m | 200Mi | Y | -| Operator | prometheus-operator | 100m | 50Mi | 200m | 100Mi | Y | - - -### Resource Consumption of Other Pods - -Besides the Prometheus pod, there are components that are deployed that require additional resources on the worker nodes. - -Pod | CPU (milli CPU) | Memory (MB) -----|-----------------|------------ -Node Exporter (Per Node) | 100 | 30 -Kube State Cluster Monitor | 100 | 130 -Grafana | 100 | 150 -Prometheus Cluster Monitoring Nginx | 50 | 50 diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics/_index.md deleted file mode 100644 index 8d28fbd28..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics/_index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Cluster Metrics -weight: 3 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring/cluster-metrics - - /rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics ---- - -_Available as of v2.2.0_ - -Cluster metrics display the hardware utilization for all nodes in your cluster, regardless of its role. They give you a global monitoring insight into the cluster. - -Some of the biggest metrics to look out for: - -- **CPU Utilization** - - High load either indicates that your cluster is running efficiently or that you're running out of CPU resources. - -- **Disk Utilization** - - Be on the lookout for increased read and write rates on nodes nearing their disk capacity. This advice is especially true for etcd nodes, as running out of storage on an etcd node leads to cluster failure. - -- **Memory Utilization** - - Deltas in memory utilization usually indicate a memory leak. - -- **Load Average** - - Generally, you want your load average to match your number of logical CPUs for the cluster. For example, if your cluster has 8 logical CPUs, the ideal load average would be 8 as well. If you load average is well under the number of logical CPUs for the cluster, you may want to reduce cluster resources. On the other hand, if your average is over 8, your cluster may need more resources. - -## Finding Node Metrics - -1. From the **Global** view, navigate to the cluster that you want to view metrics. - -1. Select **Nodes** in the navigation bar. - -1. Select a specific node and click on its name. - -1. Click on **Node Metrics**. - -[_Get expressions for Cluster Metrics_]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#cluster-metrics) - -### Etcd Metrics - ->**Note:** Only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -Etcd metrics display the operations of the etcd database on each of your cluster nodes. After establishing a baseline of normal etcd operational metrics, observe them for abnormal deltas between metric refreshes, which indicate potential issues with etcd. Always address etcd issues immediately! - -You should also pay attention to the text at the top of the etcd metrics, which displays leader election statistics. This text indicates if etcd currently has a leader, which is the etcd instance that coordinates the other etcd instances in your cluster. A large increase in leader changes implies etcd is unstable. If you notice a change in leader election statistics, you should investigate them for issues. - -Some of the biggest metrics to look out for: - -- **Etcd has a leader** - - etcd is usually deployed on multiple nodes and elects a leader to coordinate its operations. If etcd does not have a leader, its operations are not being coordinated. - -- **Number of leader changes** - - If this statistic suddenly grows, it usually indicates network communication issues that constantly force the cluster to elect a new leader. - -[_Get expressions for Etcd Metrics_]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#etcd-metrics) - -### Kubernetes Components Metrics - -Kubernetes components metrics display data about the cluster's individual Kubernetes components. Primarily, it displays information about connections and latency for each component: the API server, controller manager, scheduler, and ingress controller. - ->**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -When analyzing Kubernetes component metrics, don't be concerned about any single standalone metric in the charts and graphs that display. Rather, you should establish a baseline for metrics considered normal following a period of observation, e.g. the range of values that your components usually operate within and are considered normal. After you establish this baseline, be on the lookout for large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. - -Some of the more important component metrics to monitor are: - -- **API Server Request Latency** - - Increasing API response times indicate there's a generalized problem that requires investigation. - -- **API Server Request Rate** - - Rising API request rates usually coincide with increased API response times. Increased request rates also indicate a generalized problem requiring investigation. - -- **Scheduler Preemption Attempts** - - If you see a spike in scheduler preemptions, it's an indication that you're running out of hardware resources, as Kubernetes is recognizing it doesn't have enough resources to run all your pods and is prioritizing the more important ones. - -- **Scheduling Failed Pods** - - Failed pods can have a variety of causes, such as unbound persistent volume claims, exhausted hardware resources, non-responsive nodes, etc. - -- **Ingress Controller Request Process Time** - - How fast ingress is routing connections to your cluster services. - -[_Get expressions for Kubernetes Component Metrics_]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/#kubernetes-components-metrics) - -## Rancher Logging Metrics - -Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/). - -[_Get expressions for Rancher Logging Metrics_]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#rancher-logging-metrics) - -## Finding Workload Metrics - -Workload metrics display the hardware utilization for a Kubernetes workload. You can also view metrics for [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [stateful sets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) and so on. - -1. From the **Global** view, navigate to the project that you want to view workload metrics. - -1. From the main navigation bar, choose **Resources > Workloads.** In versions before v2.3.0, choose **Workloads** on the main navigation bar. - -1. Select a specific workload and click on its name. - -1. In the **Pods** section, select a specific pod and click on its name. - - - **View the Pod Metrics:** Click on **Pod Metrics**. - - **View the Container Metrics:** In the **Containers** section, select a specific container and click on its name. Click on **Container Metrics**. - -[_Get expressions for Workload Metrics_]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/_index.md deleted file mode 100644 index 0a1fe82a5..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/_index.md +++ /dev/null @@ -1,491 +0,0 @@ ---- -title: Prometheus Custom Metrics Adapter -weight: 5 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring/custom-metrics - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/custom-metrics - - /rancher/v2.x/en/cluster-admin/tools/monitoring/custom-metrics/ ---- - -After you've enabled [cluster level monitoring]({{< baseurl >}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/#enabling-cluster-monitoring), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. - -## Deploy Prometheus Custom Metrics Adapter - -We are going to use the [Prometheus custom metrics adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/releases/tag/v0.5.0), version v0.5.0. This is a great example for the [custom metrics server](https://github.com/kubernetes-incubator/custom-metrics-apiserver). And you must be the *cluster owner* to execute following steps. - -- Get the service account of the cluster monitoring is using. It should be configured in the workload ID: `statefulset:cattle-prometheus:prometheus-cluster-monitoring`. And if you didn't customize anything, the service account name should be `cluster-monitoring`. - -- Grant permission to that service account. You will need two kinds of permission. -One role is `extension-apiserver-authentication-reader` in `kube-system`, so you will need to create a `Rolebinding` to in `kube-system`. This permission is to get api aggregation configuration from config map in `kube-system`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: custom-metrics-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: cluster-monitoring - namespace: cattle-prometheus -``` - -The other one is cluster role `system:auth-delegator`, so you will need to create a `ClusterRoleBinding`. This permission is to have subject access review permission. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: custom-metrics:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: cluster-monitoring - namespace: cattle-prometheus -``` - -- Create configuration for custom metrics adapter. Following is an example configuration. There will be a configuration details in next session. - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: adapter-config - namespace: cattle-prometheus -data: - config.yaml: | - rules: - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: [] - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_seconds_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)$ - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_total$ - resources: - template: <<.Resource>> - name: - matches: "" - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_seconds_total - resources: - template: <<.Resource>> - name: - matches: ^(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: [] - resources: - template: <<.Resource>> - name: - matches: ^(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - resourceRules: - cpu: - containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - memory: - containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) - nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - window: 1m -``` - -- Create HTTPS TLS certs for your api server. You can use following command to create a self-signed cert. - -```bash -openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out serving.crt -keyout serving.key -subj "/C=CN/CN=custom-metrics-apiserver.cattle-prometheus.svc.cluster.local" -# And you will find serving.crt and serving.key in your path. And then you are going to create a secret in cattle-prometheus namespace. -kubectl create secret generic -n cattle-prometheus cm-adapter-serving-certs --from-file=serving.key=./serving.key --from-file=serving.crt=./serving.crt -``` - -- Then you can create the prometheus custom metrics adapter. And you will need a service for this deployment too. Creating it via Import YAML or Rancher would do. Please create those resources in `cattle-prometheus` namespaces. - -Here is the prometheus custom metrics adapter deployment. -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: custom-metrics-apiserver - name: custom-metrics-apiserver - namespace: cattle-prometheus -spec: - replicas: 1 - selector: - matchLabels: - app: custom-metrics-apiserver - template: - metadata: - labels: - app: custom-metrics-apiserver - name: custom-metrics-apiserver - spec: - serviceAccountName: cluster-monitoring - containers: - - name: custom-metrics-apiserver - image: directxman12/k8s-prometheus-adapter-amd64:v0.5.0 - args: - - --secure-port=6443 - - --tls-cert-file=/var/run/serving-cert/serving.crt - - --tls-private-key-file=/var/run/serving-cert/serving.key - - --logtostderr=true - - --prometheus-url=http://prometheus-operated/ - - --metrics-relist-interval=1m - - --v=10 - - --config=/etc/adapter/config.yaml - ports: - - containerPort: 6443 - volumeMounts: - - mountPath: /var/run/serving-cert - name: volume-serving-cert - readOnly: true - - mountPath: /etc/adapter/ - name: config - readOnly: true - - mountPath: /tmp - name: tmp-vol - volumes: - - name: volume-serving-cert - secret: - secretName: cm-adapter-serving-certs - - name: config - configMap: - name: adapter-config - - name: tmp-vol - emptyDir: {} - -``` - -Here is the service of the deployment. -```yaml -apiVersion: v1 -kind: Service -metadata: - name: custom-metrics-apiserver - namespace: cattle-prometheus -spec: - ports: - - port: 443 - targetPort: 6443 - selector: - app: custom-metrics-apiserver -``` - -- Create API service for your custom metric server. - -```yaml -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.custom.metrics.k8s.io -spec: - service: - name: custom-metrics-apiserver - namespace: cattle-prometheus - group: custom.metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 - -``` - -- Then you can verify your custom metrics server by `kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1`. If you see the return datas from the api, it means that the metrics server has been successfully set up. - -- You create HPA with custom metrics now. Here is an example of HPA. You will need to create a nginx deployment in your namespace first. - -```yaml -kind: HorizontalPodAutoscaler -apiVersion: autoscaling/v2beta1 -metadata: - name: nginx -spec: - scaleTargetRef: - # point the HPA at the nginx deployment you just created - apiVersion: apps/v1 - kind: Deployment - name: nginx - # autoscale between 1 and 10 replicas - minReplicas: 1 - maxReplicas: 10 - metrics: - # use a "Pods" metric, which takes the average of the - # given metric across all pods controlled by the autoscaling target - - type: Pods - pods: - metricName: memory_usage_bytes - targetAverageValue: 5000000 -``` - -And then, you should see your nginx is scaling up. HPA with custom metrics works. - -## Configuration of prometheus custom metrics adapter - -> Refer to https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md - -The adapter determines which metrics to expose, and how to expose them, -through a set of "discovery" rules. Each rule is executed independently -(so make sure that your rules are mutually exclusive), and specifies each -of the steps the adapter needs to take to expose a metric in the API. - -Each rule can be broken down into roughly four parts: - -- *Discovery*, which specifies how the adapter should find all Prometheus - metrics for this rule. - -- *Association*, which specifies how the adapter should determine which - Kubernetes resources a particular metric is associated with. - -- *Naming*, which specifies how the adapter should expose the metric in - the custom metrics API. - -- *Querying*, which specifies how a request for a particular metric on one - or more Kubernetes objects should be turned into a query to Prometheus. - -A basic config with one rule might look like: - -```yaml -rules: -# this rule matches cumulative cAdvisor metrics measured in seconds -- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - resources: - # skip specifying generic resource<->label mappings, and just - # attach only pod and namespace resources by mapping label names to group-resources - overrides: - namespace: {resource: "namespace"}, - pod_name: {resource: "pod"}, - # specify that the `container_` and `_seconds_total` suffixes should be removed. - # this also introduces an implicit filter on metric family names - name: - # we use the value of the capture group implicitly as the API name - # we could also explicitly write `as: "$1"` - matches: "^container_(.*)_seconds_total$" - # specify how to construct a query to fetch samples for a given series - # This is a Go template where the `.Series` and `.LabelMatchers` string values - # are available, and the delimiters are `<<` and `>>` to avoid conflicts with - # the prometheus query language - metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" -``` - -### Discovery - -Discovery governs the process of finding the metrics that you want to -expose in the custom metrics API. There are two fields that factor into -discovery: `seriesQuery` and `seriesFilters`. - -`seriesQuery` specifies Prometheus series query (as passed to the -`/api/v1/series` endpoint in Prometheus) to use to find some set of -Prometheus series. The adapter will strip the label values from this -series, and then use the resulting metric-name-label-names combinations -later on. - -In many cases, `seriesQuery` will be sufficient to narrow down the list of -Prometheus series. However, sometimes (especially if two rules might -otherwise overlap), it's useful to do additional filtering on metric -names. In this case, `seriesFilters` can be used. After the list of -series is returned from `seriesQuery`, each series has its metric name -filtered through any specified filters. - -Filters may be either: - -- `is: `, which matches any series whose name matches the specified - regex. - -- `isNot: `, which matches any series whose name does not match the - specified regex. - -For example: - -```yaml -# match all cAdvisor metrics that aren't measured in seconds -seriesQuery: '{__name__=~"^container_.*_total",container_name!="POD",namespace!="",pod_name!=""}' -seriesFilters: - isNot: "^container_.*_seconds_total" -``` - -### Association - -Association governs the process of figuring out which Kubernetes resources -a particular metric could be attached to. The `resources` field controls -this process. - -There are two ways to associate resources with a particular metric. In -both cases, the value of the label becomes the name of the particular -object. - -One way is to specify that any label name that matches some particular -pattern refers to some group-resource based on the label name. This can -be done using the `template` field. The pattern is specified as a Go -template, with the `Group` and `Resource` fields representing group and -resource. You don't necessarily have to use the `Group` field (in which -case the group is guessed by the system). For instance: - -```yaml -# any label `kube__` becomes . in Kubernetes -resources: - template: "kube_<<.Group>>_<<.Resource>>" -``` - -The other way is to specify that some particular label represents some -particular Kubernetes resource. This can be done using the `overrides` -field. Each override maps a Prometheus label to a Kubernetes -group-resource. For instance: - -```yaml -# the microservice label corresponds to the apps.deployment resource -resource: - overrides: - microservice: {group: "apps", resource: "deployment"} -``` - -These two can be combined, so you can specify both a template and some -individual overrides. - -The resources mentioned can be any resource available in your kubernetes -cluster, as long as you've got a corresponding label. - -### Naming - -Naming governs the process of converting a Prometheus metric name into -a metric in the custom metrics API, and vice versa. It's controlled by -the `name` field. - -Naming is controlled by specifying a pattern to extract an API name from -a Prometheus name, and potentially a transformation on that extracted -value. - -The pattern is specified in the `matches` field, and is just a regular -expression. If not specified, it defaults to `.*`. - -The transformation is specified by the `as` field. You can use any -capture groups defined in the `matches` field. If the `matches` field -doesn't contain capture groups, the `as` field defaults to `$0`. If it -contains a single capture group, the `as` field defautls to `$1`. -Otherwise, it's an error not to specify the as field. - -For example: - -```yaml -# match turn any name _total to _per_second -# e.g. http_requests_total becomes http_requests_per_second -name: - matches: "^(.*)_total$" - as: "${1}_per_second" -``` - -### Querying - -Querying governs the process of actually fetching values for a particular -metric. It's controlled by the `metricsQuery` field. - -The `metricsQuery` field is a Go template that gets turned into -a Prometheus query, using input from a particular call to the custom -metrics API. A given call to the custom metrics API is distilled down to -a metric name, a group-resource, and one or more objects of that -group-resource. These get turned into the following fields in the -template: - -- `Series`: the metric name -- `LabelMatchers`: a comma-separated list of label matchers matching the - given objects. Currently, this is the label for the particular - group-resource, plus the label for namespace, if the group-resource is - namespaced. -- `GroupBy`: a comma-separated list of labels to group by. Currently, - this contains the group-resource label used in `LabelMatchers`. - -For instance, suppose we had a series `http_requests_total` (exposed as -`http_requests_per_second` in the API) with labels `service`, `pod`, -`ingress`, `namespace`, and `verb`. The first four correspond to -Kubernetes resources. Then, if someone requested the metric -`pods/http_request_per_second` for the pods `pod1` and `pod2` in the -`somens` namespace, we'd have: - -- `Series: "http_requests_total"` -- `LabelMatchers: "pod=~\"pod1|pod2",namespace="somens"` -- `GroupBy`: `pod` - -Additionally, there are two advanced fields that are "raw" forms of other -fields: - -- `LabelValuesByName`: a map mapping the labels and values from the - `LabelMatchers` field. The values are pre-joined by `|` - (for used with the `=~` matcher in Prometheus). -- `GroupBySlice`: the slice form of `GroupBy`. - -In general, you'll probably want to use the `Series`, `LabelMatchers`, and -`GroupBy` fields. The other two are for advanced usage. - -The query is expected to return one value for each object requested. The -adapter will use the labels on the returned series to associate a given -series back to its corresponding object. - -For example: - -```yaml -# convert cumulative cAdvisor metrics into rates calculated over 2 minutes -metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" -``` diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/_index.md deleted file mode 100644 index daa7524e6..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/_index.md +++ /dev/null @@ -1,434 +0,0 @@ ---- -title: Prometheus Expressions -weight: 4 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring/expression - - /rancher/v2.x/en/cluster-admin/tools/monitoring/expression - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression ---- - -The PromQL expressions in this doc can be used to configure [alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) - -> Before expression can be used in alerts, monitoring must be enabled. For more information, refer to the documentation on enabling monitoring [at the cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/#enabling-cluster-monitoring) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring) - -For more information about querying Prometheus, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) - - - -- [Cluster Metrics](#cluster-metrics) - - [Cluster CPU Utilization](#cluster-cpu-utilization) - - [Cluster Load Average](#cluster-load-average) - - [Cluster Memory Utilization](#cluster-memory-utilization) - - [Cluster Disk Utilization](#cluster-disk-utilization) - - [Cluster Disk I/O](#cluster-disk-i-o) - - [Cluster Network Packets](#cluster-network-packets) - - [Cluster Network I/O](#cluster-network-i-o) -- [Node Metrics](#node-metrics) - - [Node CPU Utilization](#node-cpu-utilization) - - [Node Load Average](#node-load-average) - - [Node Memory Utilization](#node-memory-utilization) - - [Node Disk Utilization](#node-disk-utilization) - - [Node Disk I/O](#node-disk-i-o) - - [Node Network Packets](#node-network-packets) - - [Node Network I/O](#node-network-i-o) -- [Etcd Metrics](#etcd-metrics) - - [Etcd Has a Leader](#etcd-has-a-leader) - - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) - - [Number of Failed Proposals](#number-of-failed-proposals) - - [GRPC Client Traffic](#grpc-client-traffic) - - [Peer Traffic](#peer-traffic) - - [DB Size](#db-size) - - [Active Streams](#active-streams) - - [Raft Proposals](#raft-proposals) - - [RPC Rate](#rpc-rate) - - [Disk Operations](#disk-operations) - - [Disk Sync Duration](#disk-sync-duration) -- [Kubernetes Components Metrics](#kubernetes-components-metrics) - - [API Server Request Latency](#api-server-request-latency) - - [API Server Request Rate](#api-server-request-rate) - - [Scheduling Failed Pods](#scheduling-failed-pods) - - [Controller Manager Queue Depth](#controller-manager-queue-depth) - - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) - - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) - - [Ingress Controller Connections](#ingress-controller-connections) - - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) -- [Rancher Logging Metrics](#rancher-logging-metrics) - - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) - - [Fluentd Input Rate](#fluentd-input-rate) - - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) - - [Fluentd Output Rate](#fluentd-output-rate) -- [Workload Metrics](#workload-metrics) - - [Workload CPU Utilization](#workload-cpu-utilization) - - [Workload Memory Utilization](#workload-memory-utilization) - - [Workload Network Packets](#workload-network-packets) - - [Workload Network I/O](#workload-network-i-o) - - [Workload Disk I/O](#workload-disk-i-o) -- [Pod Metrics](#pod-metrics) - - [Pod CPU Utilization](#pod-cpu-utilization) - - [Pod Memory Utilization](#pod-memory-utilization) - - [Pod Network Packets](#pod-network-packets) - - [Pod Network I/O](#pod-network-i-o) - - [Pod Disk I/O](#pod-disk-i-o) -- [Container Metrics](#container-metrics) - - [Container CPU Utilization](#container-cpu-utilization) - - [Container Memory Utilization](#container-memory-utilization) - - [Container Disk I/O](#container-disk-i-o) - - - -# Cluster Metrics - -### Cluster CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | - -### Cluster Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
| -| Summary |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
| - -### Cluster Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | - -### Cluster Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | - -### Cluster Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total[5m]))`
written`sum(rate(node_disk_written_bytes_total[5m]))`
| - -### Cluster Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -### Cluster Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -# Node Metrics - -### Node CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | - -### Node Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| -| Summary |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| - -### Node Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | - -### Node Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | - -### Node Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| - -### Node Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -### Node Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -# Etcd Metrics - -### Etcd Has a Leader - -`max(etcd_server_has_leader)` - -### Number of Times the Leader Changes - -`max(etcd_server_leader_changes_seen_total)` - -### Number of Failed Proposals - -`sum(etcd_server_proposals_failed_total)` - -### GRPC Client Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
| - -### Peer Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
| - -### DB Size - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | -| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | - -### Active Streams - -| Catalog | Expression | -| --- | --- | -| Detail |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
| -| Summary |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
| - -### Raft Proposals - -| Catalog | Expression | -| --- | --- | -| Detail |
applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
| -| Summary |
applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
pending`sum(increase(etcd_server_proposals_pending[5m]))`
failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
| - -### RPC Rate - -| Catalog | Expression | -| --- | --- | -| Detail |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
| -| Summary |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
| - -### Disk Operations - -| Catalog | Expression | -| --- | --- | -| Detail |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
| -| Summary |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
| - -### Disk Sync Duration - -| Catalog | Expression | -| --- | --- | -| Detail |
wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
| -| Summary |
wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
| - -# Kubernetes Components Metrics - -### API Server Request Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | -| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | - -### API Server Request Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | -| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | - -### Scheduling Failed Pods - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | -| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | - -### Controller Manager Queue Depth - -| Catalog | Expression | -| --- | --- | -| Detail |
volumes`sum(volumes_depth) by instance`
deployment`sum(deployment_depth) by instance`
replicaset`sum(replicaset_depth) by instance`
service`sum(service_depth) by instance`
serviceaccount`sum(serviceaccount_depth) by instance`
endpoint`sum(endpoint_depth) by instance`
daemonset`sum(daemonset_depth) by instance`
statefulset`sum(statefulset_depth) by instance`
replicationmanager`sum(replicationmanager_depth) by instance`
| -| Summary |
volumes`sum(volumes_depth)`
deployment`sum(deployment_depth)`
replicaset`sum(replicaset_depth)`
service`sum(service_depth)`
serviceaccount`sum(serviceaccount_depth)`
endpoint`sum(endpoint_depth)`
daemonset`sum(daemonset_depth)`
statefulset`sum(statefulset_depth)`
replicationmanager`sum(replicationmanager_depth)`
| - -### Scheduler E2E Scheduling Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | -| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | - -### Scheduler Preemption Attempts - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | -| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | - -### Ingress Controller Connections - -| Catalog | Expression | -| --- | --- | -| Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| -| Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| - -### Ingress Controller Request Process Time - -| Catalog | Expression | -| --- | --- | -| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | -| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | - -# Rancher Logging Metrics - - -### Fluentd Buffer Queue Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | - -### Fluentd Input Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | - -### Fluentd Output Errors Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | -| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | - -### Fluentd Output Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | - -# Workload Metrics - -### Workload CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | -| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | - -### Workload Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -# Pod Metrics - -### Pod CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
| - -### Pod Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | -| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | - -### Pod Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -# Container Metrics - -### Container CPU Utilization - -| Catalog | Expression | -| --- | --- | -| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | - -### Container Memory Utilization - -`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` - -### Container Disk I/O - -| Catalog | Expression | -| --- | --- | -| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring/_index.md deleted file mode 100644 index 2006e89a5..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Project Monitoring -weight: 2 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/project-monitoring ---- - -_Available as of v2.2.4_ - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -This section covers the following topics: - -- [Monitoring scope](#monitoring-scope) -- [Permissions to configure project monitoring](#permissions-to-configure-project-monitoring) -- [Enabling project monitoring](#enabling-project-monitoring) -- [Project-level monitoring resource requirements](#project-level-monitoring-resource-requirements) -- [Project metrics](#project-metrics) - -### Monitoring Scope - -Using Prometheus, you can monitor Rancher at both the [cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - -- [Cluster monitoring]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - - [Kubernetes control plane]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/#kubernetes-components-metrics) - - [etcd database]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/#etcd-metrics) - - [All nodes (including workers)]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/#cluster-metrics) - -- Project monitoring allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. - -### Permissions to Configure Project Monitoring - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. - -### Enabling Project Monitoring - -> **Prerequisite:** Cluster monitoring must be [enabled.]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) - -1. Go to the project where monitoring should be enabled. Note: When cluster monitoring is enabled, monitoring is also enabled by default in the **System** project. - -1. Select **Tools > Monitoring** in the navigation bar. - -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Enter in your desired configuration options. - -1. Click **Save**. - -### Project-Level Monitoring Resource Requirements - -Container| CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ----------|---------------|---------------|-------------|-------------|------------- -Prometheus|750m| 750Mi | 1000m | 1000Mi | Yes -Grafana | 100m | 100Mi | 200m | 200Mi | No - - -**Result:** A single application,`project-monitoring`, is added as an [application]({{}}/rancher/v2.x/en/catalog/apps/) to the project. After the application is `active`, you can start viewing project metrics through the [Rancher dashboard]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or directly from Grafana. - -> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. - -### Project Metrics -[Workload metrics]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and at the [project level.](#enabling-project-monitoring) - -You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. - -> **Example:** -> A [Redis](https://redis.io/) application is deployed in the namespace `redis-app` in the project `Datacenter`. It is monitored via [Redis exporter](https://github.com/oliver006/redis_exporter). After enabling project monitoring, you can edit the application to configure the Advanced Options -> Custom Metrics section. Enter the `Container Port` and `Path` and select the `Protocol`. - -To access a project-level Grafana instance, - -1. From the **Global** view, navigate to a cluster that has monitoring enabled. - -1. Go to a project that has monitoring enabled. - -1. From the project view, click **Apps.** In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. - -1. Go to the `project-monitoring` application. - -1. In the `project-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. - -1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. - -**Results:** You will be logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus/_index.md deleted file mode 100644 index d0ac0e39d..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus/_index.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Prometheus Configuration -weight: 1 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring/prometheus - - /rancher/v2.x/en/cluster-admin/tools/monitoring/prometheus/ - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus ---- - -_Available as of v2.2.0_ - -While configuring monitoring at either the [cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), there are multiple options that can be configured. - -- [Basic Configuration](#basic-configuration) -- [Advanced Options](#advanced-options) -- [Node Exporter](#node-exporter) -- [Persistent Storage](#persistent-storage) -- [Remote Storage](#remote-storage) - -# Basic Configuration - -Option | Description --------|------------- -Data Retention | How long your Prometheus instance retains monitoring data scraped from Rancher objects before it's purged. -[Enable Node Exporter](#node-exporter) | Whether or not to deploy the node exporter. -Node Exporter Host Port | The host port on which data is exposed, i.e. data that Prometheus collects from your node hardware. Required if you have enabled the node exporter. -[Enable Persistent Storage](#persistent-storage) for Prometheus | Whether or not to configure storage for Prometheus so that metrics can be retained even if the Prometheus pod fails. -[Enable Persistent Storage](#persistent-storage) for Grafana | Whether or not to configure storage for Grafana so that the Grafana dashboards and configuration can be retained even if the Grafana pod fails. -Prometheus [CPU Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU resource limit for the Prometheus pod. -Prometheus [CPU Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU reservation for the Prometheus pod. -Prometheus [Memory Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource limit for the Prometheus pod. -Prometheus [Memory Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource requests for the Prometheus pod. -Selector | Ability to select the nodes in which Prometheus and Grafana pods are deployed to. To use this option, the nodes must have labels. - -# Advanced Options - -Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog]({{}}/rancher/v2.x/en/catalog/), it can be configured like any other catalog application, by passing in values to Helm. - -> **Warning:** Any modification to the application without understanding the entire application can lead to catastrophic errors. - -### Prometheus RemoteRead and RemoteWrite - -_Available as of v2.4.0_ - -Prometheus RemoteRead and RemoteWrite can be configured as custom answers in the **Advanced Options** section. - -For more information on remote endpoints and storage, refer to the [Prometheus documentation.](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) - -The Prometheus operator documentation contains the full [RemoteReadSpec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) - -An example configuration would be: - -| Variable | Value | -|--------------|------------| -| `prometheus.remoteWrite[0].url` | `http://mytarget.com` | - -### LivenessProbe and ReadinessProbe - -_Available as of v2.4.0_ - -Prometheus LivenessProbe and ReadinessProbe can be configured as custom answers in the **Advanced Options** section. - -The Kubernetes probe spec is [here.](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#probe-v1-core) - -Some example key-value pairs are: - -| Variable | Value | -|--------------|------------| -| `prometheus.livenessProbe.timeoutSeconds` | 60 | -| `prometheus.readinessProbe.timeoutSeconds` | 60 | - -# Node Exporter - -The [node exporter](https://github.com/prometheus/node_exporter/blob/master/README.md) is a popular open source exporter, which exposes the metrics for hardware and \*NIX kernels OS. It is designed to monitor the host system. However, there are still issues with namespaces when running it in a container, mostly around filesystem mount spaces. In order to monitor actual network metrics for the container network, the node exporter must be deployed with the `hostNetwork` mode. - -When configuring Prometheus and enabling the node exporter, enter a host port in the **Node Exporter Host Port** that will not produce port conflicts with existing applications. The host port chosen must be open to allow internal traffic between Prometheus and the Node Exporter. - ->**Warning:** In order for Prometheus to collect the metrics of the node exporter, after enabling cluster monitoring, you must open the Node Exporter Host Port in the host firewall rules to allow intranet access. By default, `9796` is used as that host port. - -# Persistent Storage - ->**Prerequisite:** Configure one or more StorageClasses to use as [persistent storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) for your Prometheus or Grafana pod. - -By default, when you enable Prometheus for either a cluster or project, all monitoring data that Prometheus collects is stored on its own pod. With local storage, if the Prometheus or Grafana pods fail, all the data is lost. Rancher recommends configuring an external persistent storage to the cluster. With the external persistent storage, if the Prometheus or Grafana pods fail, the new pods can recover using data from the persistent storage. - -When enabling persistent storage for Prometheus or Grafana, specify the size of the persistent volume and select the StorageClass. - -# Remote Storage - ->**Prerequisite:** Need a remote storage endpoint to be available. The possible list of integrations is available [here](https://prometheus.io/docs/operating/integrations/) - -Using advanced options, remote storage integration for the Prometheus installation can be configured as follows: - -``` -prometheus.remoteWrite[0].url = http://remote1/push -prometheus.remoteWrite[0].remoteTimeout = 33s - -prometheus.remoteWrite[1].url = http://remote2/push - - -prometheus.remoteRead[0].url = http://remote1/read -prometheus.remoteRead[0].proxyUrl = http://proxy.url -prometheus.remoteRead[0].bearerToken = token-value - -prometheus.remoteRead[1].url = http://remote2/read -prometheus.remoteRead[1].remoteTimeout = 33s -prometheus.remoteRead[1].readRecent = true -``` - -Additional fields can be set up based on the [ReadSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics/_index.md deleted file mode 100644 index f5c02d08a..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Viewing Metrics -weight: 2 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring/viewing-metrics - - /rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/viewing-metrics ---- - -_Available as of v2.2.0_ - -After you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), you will want to be start viewing the data being collected. There are multiple ways to view this data. - -## Rancher Dashboard - ->**Note:** This is only available if you've enabled monitoring at the [cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/#enabling-cluster-monitoring). Project specific analytics must be viewed using the project's Grafana instance. - -Rancher's dashboards are available at multiple locations: - -- **Cluster Dashboard**: From the **Global** view, navigate to the cluster. -- **Node Metrics**: From the **Global** view, navigate to the cluster. Select **Nodes**. Find the individual node and click on its name. Click **Node Metrics.** -- **Workload Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Click **Workload Metrics.** -- **Pod Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Click **Pod Metrics.** -- **Container Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** - -Prometheus metrics are displayed and are denoted with the Grafana icon. If you click on the icon, the metrics will open a new tab in Grafana. - -Within each Prometheus metrics widget, there are several ways to customize your view. - -- Toggle between two views: - - **Detail**: Displays graphs and charts that let you view each event in a Prometheus time series - - **Summary** Displays events in a Prometheus time series that are outside the norm. -- Change the range of the time series that you're viewing to see a more refined or expansive data sample. -- Customize the data sample to display data between specific dates and times. - -When analyzing these metrics, don't be concerned about any single standalone metric in the charts and graphs. Rather, you should establish a baseline for your metrics over the course of time, e.g. the range of values that your components usually operate within and are considered normal. After you establish the baseline, be on the lookout for any large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. - -## Grafana - -If you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/#enabling-cluster-monitoring) or [project level]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/#enabling-project-monitoring), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. - -Grafana allows you to query, visualize, alert, and ultimately, understand your cluster and workload data. For more information on Grafana and its capabilities, visit the [Grafana website](https://grafana.com/grafana). - -### Authentication - -Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. - -When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. - -### Accessing the Cluster-level Grafana Instance - -1. From the **Global** view, navigate to a cluster that has monitoring enabled. - -1. Go to the **System** project view. This project is where the cluster-level Grafana instance runs. - -1. Click **Apps.** In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. - -1. Go to the `cluster-monitoring` application. - -1. In the `cluster-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. - -1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. - -**Results:** You are logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers/_index.md deleted file mode 100644 index 3b64aedfa..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers/_index.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: Notifiers -weight: 4 -aliases: - - /rancher/v2.x/en/project-admin/tools/notifiers - - /rancher/v2.x/en/cluster-admin/tools/notifiers - - /rancher/v2.x/en/monitoring-alerting/legacy/notifiers ---- - -> In Rancher 2.5, the notifier application was improved. There are now two ways to enable notifiers. The older way is documented in this section, and the new application for notifiers is documented [here.]({{}}/rancher/v2.x/en/monitoring-alerting) - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. - -Rancher integrates with a variety of popular IT services, including: - -- **Slack**: Send alert notifications to your Slack channels. -- **Email**: Choose email recipients for alert notifications. -- **PagerDuty**: Route notifications to staff by phone, SMS, or personal email. -- **WebHooks**: Update a webpage with alert notifications. -- **WeChat**: (Available as of v2.2.0) Send alert notifications to your Enterprise WeChat contacts. -- **DingTalk**: (Available as of v2.4.6) Send alert notifications to DingTalk using a webhook. -- **Microsoft Teams**: (Available as of v2.4.6) Send alert notifications to Teams using a webhook. - -This section covers the following topics: - -- [Roles-based access control for notifiers](#roles-based-access-control-for-notifiers) -- [Adding notifiers](#adding-notifiers) -- [Configuration](#configuration) -- [Managing notifiers](#managing-notifiers) -- [Example payload for a webhook alert notifier](#example-payload-for-a-webhook-alert-notifier) - -# Roles-based Access Control for Notifiers - -Notifiers are configured at the cluster level. This model ensures that only cluster owners need to configure notifiers, leaving project owners to simply configure alerts in the scope of their projects. You don't need to dispense privileges like SMTP server access or cloud account access. - -# Adding Notifiers - -Set up a notifier so that you can begin configuring and sending alerts. - -1. From the **Global View**, open the cluster that you want to add a notifier. -1. From the main menu, select **Tools > Notifiers**. Then click **Add Notifier**. -1. Select the service you want to use as your notifier, and then fill out the form. For help filling out the form, refer to the configuration section below. -1. Click **Test.** You should receive a notification confirming that the notifier is configured correctly. -1. Click **Add** to complete adding the notifier. - -**Result:** Your notifier is added to Rancher. - -# Configuration - -- [Slack](#slack) -- [Email](#email) -- [PagerDuty](#pagerduty) -- [Webhook](#webhook) -- [WeChat](#wechat) -- [DingTalk](#dingtalk) -- [Microsoft Teams](#microsoft-teams) - -### Slack - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| URL | From Slack, create a webhook. For instructions, see the [Slack Documentation](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack). Then enter the Slack webhook URL. | -| Default Channel | Enter the name of the channel that you want to send alert notifications in the following format: `#`. Both public and private channels are supported. | -| Proxy URL | Proxy for the Slack webhook. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test**. If the test is successful, the Slack channel you're configuring for the notifier outputs **Slack setting validated.** - -### Email - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Default Recipient Address | Enter the email address that you want to receive the notification. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -SMTP Server Configuration: - -| Field | Explanation | -|----------|----------------------| -| Sender | Enter an email address available on your mail server that you want to send the notification. | -| Host | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com` | -| Port | In the **Port** field, enter the port used for email. Typically, TLS uses `587` and SSL uses `465`. | -| Use TLS | If you're using TLS, make sure **Use TLS** is selected. | -| Username | Username to authenticate with the SMTP server. | -| Password | Password to authenticate with the SMTP server. | - -**Validation:** Click **Test**. If the test is successful, Rancher prints **settings validated** and you receive a test notification email. - -### PagerDuty - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Default Integration Key | From PagerDuty, create a Prometheus integration. For instructions, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). Then enter the integration key. -| Service Key | The same as the integration key. For instructions on creating a Prometheus integration, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). Then enter the integration key. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test**. If the test is successful, your PagerDuty endpoint outputs **PagerDuty setting validated.** - -### Webhook - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| URL | Using the app of your choice, create a webhook URL. | -| Proxy URL | Proxy for the webhook. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test**. If the test is successful, the URL you're configuring as a notifier outputs **Webhook setting validated.** - -### WeChat - -_Available as of v2.2.0_ - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Corporation ID | Enter the "EnterpriseID" of your corporation. You can get it fro the [Profile page](https://work.weixin.qq.com/wework_admin/frame#profile). | -| Application Agent ID | From Enterprise WeChat, create an application in the [Application page](https://work.weixin.qq.com/wework_admin/frame#apps), and then enter the "AgentId" of this application. You will also need to enter the application secret. | -| Application Secret | The secret that corresponds to the Application Agent ID. | -| Recipient Type | Party, tag, or user. | -| Default Recipient | The default recipient ID should correspond to the recipient type. It should be the party ID, tag ID or user account that you want to receive the notification. You could get contact information from [Contacts page](https://work.weixin.qq.com/wework_admin/frame#contacts). | -| Proxy URL | If you are using a proxy, enter the proxy URL. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test.** If the test is successful, you should receive an alert message. - -### DingTalk - -_Available as of v2.4.6_ - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Webhook URL | Enter the DingTalk webhook URL. For help setting up the webhook, refer to the [DingTalk documentation.](https://www.alibabacloud.com/help/doc-detail/52872.htm) | -| Secret | Optional: Enter a secret for the DingTalk webhook. | -| Proxy URL | Optional: Enter a proxy for the DingTalk webhook. | -| Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test.** If the test is successful, the DingTalk notifier output is **DingTalk setting validated.** - -### Microsoft Teams - -_Available as of v2.4.6_ - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Webhook URL | Enter the Microsoft Teams webhook URL. For help setting up the webhook, refer to the [Teams Documentation.](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook) | -| Proxy URL | Optional: Enter a proxy for the Teams webhook. | -| Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test.** If the test is successful, the Teams notifier output is **MicrosoftTeams setting validated.** - -# Managing Notifiers - -After you set up notifiers, you can manage them. From the **Global** view, open the cluster that you want to manage your notifiers. Select **Tools > Notifiers**. You can: - -- **Edit** their settings that you configured during their initial setup. -- **Clone** them, to quickly setup slightly different notifiers. -- **Delete** them when they're no longer necessary. - -# Example Payload for a Webhook Alert Notifier - -```json -{ - "receiver": "c-2a3bc:kube-components-alert", - "status": "firing", - "alerts": [ - { - "status": "firing", - "labels": { - "alert_name": "Scheduler is unavailable", - "alert_type": "systemService", - "cluster_name": "mycluster (ID: c-2a3bc)", - "component_name": "scheduler", - "group_id": "c-2a3bc:kube-components-alert", - "logs": "Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused", - "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service", - "severity": "critical" - }, - "annotations": {}, - "startsAt": "2020-01-30T19:18:13.321684733Z", - "endsAt": "0001-01-01T00:00:00Z", - "generatorURL": "" - } - ], - "groupLabels": { - "component_name": "scheduler", - "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service" - }, - "commonLabels": { - "alert_name": "Scheduler is unavailable", - "alert_type": "systemService", - "cluster_name": "mycluster (ID: c-2a3bc)" - } -} -``` -# What's Next? - -After creating a notifier, set up alerts to receive notifications of Rancher system events. - -- [Cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can set up alerts at the [cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/). -- [Project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can set up alerts at the [project level]({{}}/rancher/v2.x/en/project-admin/tools/alerts/). diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/_index.md deleted file mode 100644 index f6ff86eb1..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/_index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Monitoring in Rancher v2.5 -shortTitle: Rancher v2.5 -weight: 1 ---- - -Using Rancher, you can quickly deploy leading open-source monitoring alerting solutions onto your cluster. - -The `rancher-monitoring` operator, introduced in Rancher v2.5, is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. - -Rancher's solution allows users to: - -- Monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments via Prometheus, a leading open-source monitoring solution. -- Define alerts based on metrics collected via Prometheus -- Create custom dashboards to make it easy to visualize collected metrics via Grafana -- Configure alert-based notifications via Email, Slack, PagerDuty, etc. using Prometheus Alertmanager -- Defines precomputed, frequently needed or computationally expensive expressions as new time series based on metrics collected via Prometheus (only available in 2.5) -- Expose collected metrics from Prometheus to the Kubernetes Custom Metrics API via Prometheus Adapter for use in HPA (only available in 2.5) - -More information about the resources that get deployed onto your cluster to support this solution can be found in the [`rancher-monitoring`](https://github.com/rancher/charts/tree/main/charts/rancher-monitoring) Helm chart, which closely tracks the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community with certain changes tracked in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md). - -> If you previously enabled Monitoring, Alerting, or Notifiers in Rancher before v2.5, there is no upgrade path for switching to the new monitoring/ alerting solution. You will need to disable monitoring/ alerting/notifiers in Cluster Manager before deploying the new monitoring solution via Cluster Explorer. - -For more information about upgrading the Monitoring app in Rancher 2.5, please refer to the [migration docs](./migrating). - -- [About Prometheus](#about-prometheus) -- [Enable Monitoring](#enable-monitoring) - - [Default Alerts, Targets, and Grafana Dashboards](#default-alerts-targets-and-grafana-dashboards) -- [Using Monitoring](#using-monitoring) - - [Grafana UI](#grafana-ui) - - [Prometheus UI](#prometheus-ui) - - [Viewing the Prometheus Targets](#viewing-the-prometheus-targets) - - [Viewing the PrometheusRules](#viewing-the-prometheusrules) - - [Viewing Active Alerts in Alertmanager](#viewing-active-alerts-in-alertmanager) -- [Uninstall Monitoring](#uninstall-monitoring) -- [Setting Resource Limits and Requests](#setting-resource-limits-and-requests) -- [Known Issues](#known-issues) - -# About Prometheus - -Prometheus provides a time series of your data, which is, according to the [Prometheus documentation:](https://prometheus.io/docs/concepts/data_model/) - -> A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. - -In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. - -By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. - -# Enable Monitoring - -As an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. - -> **Requirements:** -> -> - Make sure that you are allowing traffic on port 9796 for each of your nodes because Prometheus will scrape metrics from here. -> - Make sure your cluster fulfills the resource requirements. The cluster should have at least 1950Mi memory available, 2700m CPU, and 50Gi storage. A breakdown of the resource limits and requests is [here.](#setting-resource-limits-and-requests) -> - When installing monitoring on an RKE cluster using RancherOS or Flatcar Linux nodes, change the etcd node certificate directory to `/opt/rke/etc/kubernetes/ssl`. - - -1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** -1. Click **Apps.** -1. Click the `rancher-monitoring` app. -1. Optional: Click **Chart Options** and configure alerting, Prometheus and Grafana. For help, refer to the [configuration reference.](./configuration) -1. Scroll to the bottom of the Helm chart README and click **Install.** - -**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. - -### Default Alerts, Targets, and Grafana Dashboards - -By default, Rancher Monitoring deploys exporters (such as [node-exporter](https://github.com/prometheus/node_exporter) and [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)) as well as default Prometheus alerts and Grafana dashboards (curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project) onto a cluster. - -To see the default alerts, go to the [Alertmanager UI](#viewing-active-alerts-in-alertmanager) and click **Expand all groups.** - -To see what services you are monitoring, you will need to see your targets. To view the default targets, refer to [Viewing the Prometheus Targets.](#viewing-the-prometheus-targets) - -To see the default dashboards, go to the [Grafana UI.](#grafana-ui) In the left navigation bar, click the icon with four boxes and click **Manage.** - -### Next Steps - -To configure Prometheus resources from the Rancher UI, click **Apps & Marketplace > Monitoring** in the upper left corner. - -# Using Monitoring - -Installing `rancher-monitoring` makes the following dashboards available from the Rancher UI. - -> **Note:** If you want to set up Alertmanager, Grafana or Ingress, it has to be done with the settings on the Helm chart deployment. It's problematic to create Ingress outside the deployment. - -### Grafana UI - -[Grafana](https://grafana.com/grafana/) allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. - -Rancher allows any users who are authenticated by Kubernetes and have access the Grafana service deployed by the Rancher Monitoring chart to access Grafana via the Rancher Dashboard UI. By default, all users who are able to access Grafana are given the [Viewer](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#viewer-role) role, which allows them to view any of the default dashboards deployed by Rancher. - -However, users can choose to log in to Grafana as an [Admin](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#admin-role) if necessary. The default Admin username and password for the Grafana instance will be `admin`/`prom-operator`, but alternative credentials can also be supplied on deploying or upgrading the chart. - -> **Persistent Dashboards:** To allow the Grafana dashboard to persist after it restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. For details, refer to [this section.](./persist-grafana) - -To see the Grafana UI, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Grafana. - -
Cluster Compute Resources Dashboard in Grafana
-![Cluster Compute Resources Dashboard in Grafana]({{}}/img/rancher/cluster-compute-resources-dashboard.png) - -
Default Dashboards in Grafana
-![Default Dashboards in Grafana]({{}}/img/rancher/grafana-default-dashboard.png) - -### Prometheus UI - -To see the Prometheus UI, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Graph.** - -
Prometheus Graph UI
-![Prometheus Graph UI]({{}}/img/rancher/prometheus-graph-ui.png) - -### Viewing the Prometheus Targets - -To see the Prometheus Targets, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Targets.** - -
Targets in the Prometheus UI
-![Prometheus Targets UI]({{}}/img/rancher/prometheus-targets-ui.png) - -### Viewing the PrometheusRules - -To see the PrometheusRules, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Rules.** - -
Rules in the Prometheus UI
-![PrometheusRules UI]({{}}/img/rancher/prometheus-rules-ui.png) - -For more information on PrometheusRules in Rancher, see [this page.](./configuration/prometheusrules) - -### Viewing Active Alerts in Alertmanager - -When `rancher-monitoring` is installed, the Prometheus Alertmanager UI is deployed. - -The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of deduplicating, grouping, and routing them to the correct receiver integration such as email, PagerDuty, or OpsGenie. It also takes care of silencing and inhibition of alerts. - -In the Alertmanager UI, you can view your alerts and the current Alertmanager configuration. - -To see the PrometheusRules, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Alertmanager.** - -**Result:** The Alertmanager UI opens in a new tab. For help with configuration, refer to the [official Alertmanager documentation.](https://prometheus.io/docs/alerting/latest/alertmanager/) - -For more information on configuring Alertmanager in Rancher, see [this page.](./configuration/alertmanager) - -
The Alertmanager UI
-![Alertmanager UI]({{}}/img/rancher/alertmanager-ui.png) - -# Uninstall Monitoring - -1. From the **Cluster Explorer,** click Apps & Marketplace. -1. Click **Installed Apps.** -1. Go to the `cattle-monitoring-system` namespace and check the boxes for `rancher-monitoring-crd` and `rancher-monitoring`. -1. Click **Delete.** -1. Confirm **Delete.** - -**Result:** `rancher-monitoring` is uninstalled. - -# Setting Resource Limits and Requests - -The resource requests and limits can be configured when installing `rancher-monitoring`. - -The default values are in the [values.yaml](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/values.yaml) in the `rancher-monitoring` Helm chart. - -The default values in the table below are the minimum required resource limits and requests. - -| Resource Name | Memory Limit | CPU Limit | Memory Request | CPU Request | -| ------------- | ------------ | ----------- | ---------------- | ------------------ | -| alertmanager | 500Mi | 1000m | 100Mi | 100m | -| grafana | 200Mi | 200m | 100Mi | 100m | -| kube-state-metrics subchart | 200Mi | 100m | 130Mi | 100m | -| prometheus-node-exporter subchart | 50Mi | 200m | 30Mi | 100m | -| prometheusOperator | 500Mi | 200m | 100Mi | 100m | -| prometheus | 2500Mi | 1000m | 1750Mi | 750m | -| **Total** | **3950Mi** | **2700m** | **2210Mi** | **1250m** | - -At least 50Gi storage is recommended. - -# Known Issues - -There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more default memory. If you are enabling monitoring on a K3s cluster, we recommend to setting `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/_index.md deleted file mode 100644 index b469666ba..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/_index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Configuration -weight: 3 -aliases: - - /rancher/v2.x/en/monitoring-alerting/configuration ---- - -This page captures some of the most important options for configuring the custom resources for monitoring. - -For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. - -- [Configuring Prometheus](#configuring-prometheus) -- [Configuring Targets with ServiceMonitors and PodMonitors](#configuring-targets-with-servicemonitors-and-podmonitors) - - [ServiceMonitors](#servicemonitors) - - [PodMonitors](#podmonitors) -- [PrometheusRules](#prometheusrules) -- [Alertmanager Config](#alertmanager-config) -- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) -- [Additional Scrape Configurations](#additional-scrape-configurations) -- [Examples](#examples) - -# Configuring Prometheus - -The primary way that users will be able to customize this feature for specific Monitoring and Alerting use cases is by creating and/or modifying ConfigMaps, Secrets, and Custom Resources pertaining to this deployment. - -Prometheus Operator introduces a set of [Custom Resource Definitions](https://github.com/prometheus-operator/prometheus-operator#customresourcedefinitions) that allow users to deploy and manage Prometheus and Alertmanager instances by creating and modifying those custom resources on a cluster. - -Prometheus Operator will automatically update your Prometheus configuration based on the live state of these custom resources. - -There are also certain special types of ConfigMaps/Secrets such as those corresponding to Grafana Dashboards, Grafana Datasources, and Alertmanager Configs that will automatically update your Prometheus configuration via sidecar proxies that observe the live state of those resources within your cluster. - -By default, a set of these resources (curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project) are deployed onto your cluster as part of installing the Rancher Monitoring Application to set up a basic Monitoring / Alerting stack. For more information how to configure custom targets, alerts, notifiers, and dashboards after deploying the chart, see below. - -# Configuring Targets with ServiceMonitors and PodMonitors - -Customizing the scrape configuration used by Prometheus to determine which resources to scrape metrics from will primarily involve creating / modifying the following resources within your cluster: - -### ServiceMonitors - -This CRD declaratively specifies how groups of Kubernetes services should be monitored. Any Services in your cluster that match the labels located within the ServiceMonitor `selector` field will be monitored based on the `endpoints` specified on the ServiceMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) provided by Prometheus Operator. - -For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) - -### PodMonitors - -This CRD declaratively specifies how group of pods should be monitored. Any Pods in your cluster that match the labels located within the PodMonitor `selector` field will be monitored based on the `podMetricsEndpoints` specified on the PodMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitorspec) provided by Prometheus Operator. - -# PrometheusRules - -This CRD defines a group of Prometheus alerting and/or recording rules. - -For information on configuring PrometheusRules, refer to [this page.](./prometheusrules) - -# Alertmanager Config - -For information on configuring the Alertmanager, refer to [this page.](./alertmanager) - -# Trusted CA for Notifiers - -If you need to add a trusted CA to your notifier, follow these steps: - -1. Create the `cattle-monitoring-system` namespace. -1. Add your trusted CA secret to the `cattle-monitoring-system` namespace. -1. Deploy or upgrade the `rancher-monitoring` Helm chart. In the chart options, reference the secret in **Alerting > Additional Secrets.** - -**Result:** The default Alertmanager custom resource will have access to your trusted CA. - -# Additional Scrape Configurations - -If the scrape configuration you want cannot be specified via a ServiceMonitor or PodMonitor at the moment, you can provide an `additionalScrapeConfigSecret` on deploying or upgrading `rancher-monitoring`. - -A [scrape_config section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) specifies a set of targets and parameters describing how to scrape them. In the general case, one scrape configuration specifies a single job. - -An example of where this might be used is with Istio. For more information, see [this section.](https://rancher.com/docs/rancher/v2.x/en/istio/v2.5/configuration-reference/selectors-and-scrape) - -# Examples - -### ServiceMonitor - -An example ServiceMonitor custom resource can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) - -### PodMonitor - -An example PodMonitor can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml) An example Prometheus resource that refers to it can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml) - -### PrometheusRule - -For users who are familiar with Prometheus, a PrometheusRule contains the alerting and recording rules that you would normally place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). - -For a more fine-grained application of PrometheusRules within your cluster, the ruleSelector field on a Prometheus resource allows you to select which PrometheusRules should be loaded onto Prometheus based on the labels attached to the PrometheusRules resources. - -An example PrometheusRule is on [this page.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md) - -### Alertmanager Config - -For an example configuration, refer to [this section.](./alertmanager/#example-alertmanager-config) \ No newline at end of file diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/_index.md deleted file mode 100644 index 9b8ef4cf9..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/_index.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: Alertmanager -weight: 1 ---- - -The [Alertmanager Config](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) Secret contains the configuration of an Alertmanager instance that sends out notifications based on alerts it receives from Prometheus. - -- [Overview](#overview) - - [Connecting Routes and PrometheusRules](#connecting-routes-and-prometheusrules) -- [Creating Receivers in the Rancher UI](#creating-receivers-in-the-rancher-ui) -- [Receiver Configuration](#receiver-configuration) - - [Slack](#slack) - - [Email](#email) - - [PagerDuty](#pagerduty) - - [Opsgenie](#opsgenie) - - [Webhook](#webhook) - - [Custom](#custom) -- [Route Configuration](#route-configuration) - - [Receiver](#receiver) - - [Grouping](#grouping) - - [Matching](#matching) -- [Example Alertmanager Config](#example-alertmanager-config) -- [Example Route Config for CIS Scan Alerts](#example-route-config-for-cis-scan-alerts) - -# Overview - -By default, Rancher Monitoring deploys a single Alertmanager onto a cluster that uses a default Alertmanager Config Secret. As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster that can all be managed using the same underlying Alertmanager Config Secret. - -This Secret should be updated or modified any time you want to: - -- Add in new notifiers or receivers -- Change the alerts that should be sent to specific notifiers or receivers -- Change the group of alerts that are sent out - -> By default, you can either choose to supply an existing Alertmanager Config Secret (i.e. any Secret in the `cattle-monitoring-system` namespace) or allow Rancher Monitoring to deploy a default Alertmanager Config Secret onto your cluster. By default, the Alertmanager Config Secret created by Rancher will never be modified / deleted on an upgrade / uninstall of the `rancher-monitoring` chart to prevent users from losing or overwriting their alerting configuration when executing operations on the chart. - -For more information on what fields can be specified in this secret, please look at the [Prometheus Alertmanager docs.](https://prometheus.io/docs/alerting/latest/alertmanager/) - -The full spec for the Alertmanager configuration file and what it takes in can be found [here.](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) - -For more information, refer to the [official Prometheus documentation about configuring routes.](https://www.prometheus.io/docs/alerting/latest/configuration/#route) - -### Connecting Routes and PrometheusRules - -When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Prometheus to figure out which Route should receive this Alert. For example, an Alert with the label `team: front-end` will be sent to all Routes that match on that label. - -# Creating Receivers in the Rancher UI -_Available as of v2.5.4_ - -> **Prerequisites:** -> ->- The monitoring application needs to be installed. ->- If you configured monitoring with an existing Alertmanager Secret, it must have a format that is supported by Rancher's UI. Otherwise you will only be able to make changes based on modifying the Alertmanager Secret directly. Note: We are continuing to make enhancements to what kinds of Alertmanager Configurations we can support using the Routes and Receivers UI, so please [file an issue](https://github.com/rancher/rancher/issues/new) if you have a request for a feature enhancement. - -To create notification receivers in the Rancher UI, - -1. Click **Cluster Explorer > Monitoring** and click **Receiver.** -2. Enter a name for the receiver. -3. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. -4. Click **Create.** - -**Result:** Alerts can be configured to send notifications to the receiver(s). - -# Receiver Configuration - -The notification integrations are configured with the `receiver`, which is explained in the [Prometheus documentation.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) - -Rancher v2.5.4 introduced the capability to configure receivers by filling out forms in the Rancher UI. - -{{% tabs %}} -{{% tab "Rancher v2.5.4+" %}} - -The following types of receivers can be configured in the Rancher UI: - -- Slack -- Email -- PagerDuty -- Opsgenie -- Webhook -- Custom - -The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. - -### Slack - -| Field | Type | Description | -|------|--------------|------| -| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | -| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | -| Proxy URL | String | Proxy for the webhook notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -### Email - -| Field | Type | Description | -|------|--------------|------| -| Default Recipient Address | String | The email address that will receive notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -SMTP options: - -| Field | Type | Description | -|------|--------------|------| -| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | -| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | -| Use TLS | Bool | Use TLS for encryption. | -| Username | String | Enter a username to authenticate with the SMTP server. | -| Password | String | Enter a password to authenticate with the SMTP server. | - -### PagerDuty - -| Field | Type | Description | -|------|------|-------| -| Integration Type | String | `Events API v2` or `Prometheus`. | -| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | -| Proxy URL | String | Proxy for the PagerDuty notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -### Opsgenie - -| Field | Description | -|------|-------------| -| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | -| Proxy URL | Proxy for the Opsgenie notifications. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -Opsgenie Responders: - -| Field | Type | Description | -|-------|------|--------| -| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | -| Send To | String | Id, Name, or Username of the Opsgenie recipient. | - -### Webhook - -| Field | Description | -|-------|--------------| -| URL | Webhook URL for the app of your choice. | -| Proxy URL | Proxy for the webhook notification. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -### Custom - -The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. - -{{% /tab %}} -{{% tab "Rancher v2.5.0-2.5.3" %}} -The Alertmanager must be configured in YAML, as shown in this [example.](#example-alertmanager-config) -{{% /tab %}} -{{% /tabs %}} - - -# Route Configuration - -{{% tabs %}} -{{% tab "Rancher v2.5.4+" %}} - -### Receiver -The route needs to refer to a [receiver](#receiver-configuration) that has already been configured. - -### Grouping - -| Field | Default | Description | -|-------|--------------|---------| -| Group By | N/a | The labels by which incoming alerts are grouped together. For example, `[ group_by: '[' , ... ']' ]` Multiple alerts coming in for labels such as `cluster=A` and `alertname=LatencyHigh` can be batched into a single group. To aggregate by all possible labels, use the special value `'...'` as the sole label name, for example: `group_by: ['...']` Grouping by `...` effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. | -| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | -| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | -| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | - -### Matching - -The **Match** field refers to a set of equality matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs to the Rancher UI, they correspond to the YAML in this format: - -```yaml -match: - [ : , ... ] -``` - -The **Match Regex** field refers to a set of regex-matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs in the Rancher UI, they correspond to the YAML in this format: - -```yaml -match_re: - [ : , ... ] -``` - -{{% /tab %}} -{{% tab "Rancher v2.5.0-2.5.3" %}} -The Alertmanager must be configured in YAML, as shown in this [example.](#example-alertmanager-config) -{{% /tab %}} -{{% /tabs %}} - -# Example Alertmanager Config - -To set up notifications via Slack, the following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret, where the `api_url` should be updated to use your Webhook URL from Slack: - -```yaml -route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 3h - receiver: 'slack-notifications' -receivers: -- name: 'slack-notifications' - slack_configs: - - send_resolved: true - text: '{{ template "slack.rancher.text" . }}' - api_url: -templates: -- /etc/alertmanager/config/*.tmpl -``` - -# Example Route Config for CIS Scan Alerts - -While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. - -For example, the following example route configuration could be used with a Slack receiver named `test-cis`: - -```yaml -spec: - receiver: test-cis - group_by: -# - string - group_wait: 30s - group_interval: 30s - repeat_interval: 30s - match: - job: rancher-cis-scan -# key: string - match_re: - {} -# key: string -``` - -For more information on enabling alerting for `rancher-cis-benchmark`, see [this section.]({{}}/rancher/v2.x/en/cis-scans/v2.5/#enabling-alerting-for-rancher-cis-benchmark) diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/expression/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/expression/_index.md deleted file mode 100644 index 9203aea75..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/expression/_index.md +++ /dev/null @@ -1,432 +0,0 @@ ---- -title: Prometheus Expressions -weight: 4 -aliases: - - /rancher/v2.x/en/project-admin/tools/monitoring/expression - - /rancher/v2.x/en/cluster-admin/tools/monitoring/expression - - /rancher/v2.x/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression ---- - -The PromQL expressions in this doc can be used to configure alerts. - -For more information about querying Prometheus, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) - - - -- [Cluster Metrics](#cluster-metrics) - - [Cluster CPU Utilization](#cluster-cpu-utilization) - - [Cluster Load Average](#cluster-load-average) - - [Cluster Memory Utilization](#cluster-memory-utilization) - - [Cluster Disk Utilization](#cluster-disk-utilization) - - [Cluster Disk I/O](#cluster-disk-i-o) - - [Cluster Network Packets](#cluster-network-packets) - - [Cluster Network I/O](#cluster-network-i-o) -- [Node Metrics](#node-metrics) - - [Node CPU Utilization](#node-cpu-utilization) - - [Node Load Average](#node-load-average) - - [Node Memory Utilization](#node-memory-utilization) - - [Node Disk Utilization](#node-disk-utilization) - - [Node Disk I/O](#node-disk-i-o) - - [Node Network Packets](#node-network-packets) - - [Node Network I/O](#node-network-i-o) -- [Etcd Metrics](#etcd-metrics) - - [Etcd Has a Leader](#etcd-has-a-leader) - - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) - - [Number of Failed Proposals](#number-of-failed-proposals) - - [GRPC Client Traffic](#grpc-client-traffic) - - [Peer Traffic](#peer-traffic) - - [DB Size](#db-size) - - [Active Streams](#active-streams) - - [Raft Proposals](#raft-proposals) - - [RPC Rate](#rpc-rate) - - [Disk Operations](#disk-operations) - - [Disk Sync Duration](#disk-sync-duration) -- [Kubernetes Components Metrics](#kubernetes-components-metrics) - - [API Server Request Latency](#api-server-request-latency) - - [API Server Request Rate](#api-server-request-rate) - - [Scheduling Failed Pods](#scheduling-failed-pods) - - [Controller Manager Queue Depth](#controller-manager-queue-depth) - - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) - - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) - - [Ingress Controller Connections](#ingress-controller-connections) - - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) -- [Rancher Logging Metrics](#rancher-logging-metrics) - - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) - - [Fluentd Input Rate](#fluentd-input-rate) - - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) - - [Fluentd Output Rate](#fluentd-output-rate) -- [Workload Metrics](#workload-metrics) - - [Workload CPU Utilization](#workload-cpu-utilization) - - [Workload Memory Utilization](#workload-memory-utilization) - - [Workload Network Packets](#workload-network-packets) - - [Workload Network I/O](#workload-network-i-o) - - [Workload Disk I/O](#workload-disk-i-o) -- [Pod Metrics](#pod-metrics) - - [Pod CPU Utilization](#pod-cpu-utilization) - - [Pod Memory Utilization](#pod-memory-utilization) - - [Pod Network Packets](#pod-network-packets) - - [Pod Network I/O](#pod-network-i-o) - - [Pod Disk I/O](#pod-disk-i-o) -- [Container Metrics](#container-metrics) - - [Container CPU Utilization](#container-cpu-utilization) - - [Container Memory Utilization](#container-memory-utilization) - - [Container Disk I/O](#container-disk-i-o) - - - -# Cluster Metrics - -### Cluster CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | - -### Cluster Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
| -| Summary |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
| - -### Cluster Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | - -### Cluster Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | - -### Cluster Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total[5m]))`
written`sum(rate(node_disk_written_bytes_total[5m]))`
| - -### Cluster Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -### Cluster Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -# Node Metrics - -### Node CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | - -### Node Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| -| Summary |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| - -### Node Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | - -### Node Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | - -### Node Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| - -### Node Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -### Node Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -# Etcd Metrics - -### Etcd Has a Leader - -`max(etcd_server_has_leader)` - -### Number of Times the Leader Changes - -`max(etcd_server_leader_changes_seen_total)` - -### Number of Failed Proposals - -`sum(etcd_server_proposals_failed_total)` - -### GRPC Client Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
| - -### Peer Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
| - -### DB Size - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | -| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | - -### Active Streams - -| Catalog | Expression | -| --- | --- | -| Detail |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
| -| Summary |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
| - -### Raft Proposals - -| Catalog | Expression | -| --- | --- | -| Detail |
applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
| -| Summary |
applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
pending`sum(increase(etcd_server_proposals_pending[5m]))`
failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
| - -### RPC Rate - -| Catalog | Expression | -| --- | --- | -| Detail |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
| -| Summary |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
| - -### Disk Operations - -| Catalog | Expression | -| --- | --- | -| Detail |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
| -| Summary |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
| - -### Disk Sync Duration - -| Catalog | Expression | -| --- | --- | -| Detail |
wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
| -| Summary |
wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
| - -# Kubernetes Components Metrics - -### API Server Request Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | -| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | - -### API Server Request Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | -| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | - -### Scheduling Failed Pods - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | -| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | - -### Controller Manager Queue Depth - -| Catalog | Expression | -| --- | --- | -| Detail |
volumes`sum(volumes_depth) by instance`
deployment`sum(deployment_depth) by instance`
replicaset`sum(replicaset_depth) by instance`
service`sum(service_depth) by instance`
serviceaccount`sum(serviceaccount_depth) by instance`
endpoint`sum(endpoint_depth) by instance`
daemonset`sum(daemonset_depth) by instance`
statefulset`sum(statefulset_depth) by instance`
replicationmanager`sum(replicationmanager_depth) by instance`
| -| Summary |
volumes`sum(volumes_depth)`
deployment`sum(deployment_depth)`
replicaset`sum(replicaset_depth)`
service`sum(service_depth)`
serviceaccount`sum(serviceaccount_depth)`
endpoint`sum(endpoint_depth)`
daemonset`sum(daemonset_depth)`
statefulset`sum(statefulset_depth)`
replicationmanager`sum(replicationmanager_depth)`
| - -### Scheduler E2E Scheduling Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | -| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | - -### Scheduler Preemption Attempts - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | -| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | - -### Ingress Controller Connections - -| Catalog | Expression | -| --- | --- | -| Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| -| Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| - -### Ingress Controller Request Process Time - -| Catalog | Expression | -| --- | --- | -| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | -| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | - -# Rancher Logging Metrics - - -### Fluentd Buffer Queue Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | - -### Fluentd Input Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | - -### Fluentd Output Errors Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | -| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | - -### Fluentd Output Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | - -# Workload Metrics - -### Workload CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | -| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | - -### Workload Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -# Pod Metrics - -### Pod CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
| - -### Pod Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | -| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | - -### Pod Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -# Container Metrics - -### Container CPU Utilization - -| Catalog | Expression | -| --- | --- | -| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | - -### Container Memory Utilization - -`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` - -### Container Disk I/O - -| Catalog | Expression | -| --- | --- | -| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/prometheusrules/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/prometheusrules/_index.md deleted file mode 100644 index e30a3e6dc..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/prometheusrules/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: PrometheusRules -weight: 2 ---- - -A PrometheusRule defines a group of Prometheus alerting and/or recording rules. - -- [About PrometheusRule Custom Resources](#about-prometheusrule-custom-resources) -- [Connecting Routes and PrometheusRules](#connecting-routes-and-prometheusrules) -- [Creating PrometheusRules in the Rancher UI](#creating-prometheusrules-in-the-rancher-ui) -- [Configuration](#configuration) - - [Rule Group](#rule-group) - - [Alerting Rules](#alerting-rules) - - [Recording Rules](#recording-rules) - -### About PrometheusRule Custom Resources - -Prometheus rule files are held in PrometheusRule custom resources. - -A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: - -- The name of the new alert or record -- A PromQL (Prometheus query language) expression for the new alert or record -- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) -- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. - -Alerting rules define alert conditions based on PromQL queries. Recording rules precompute frequently needed or computationally expensive queries at defined intervals. - -For more information on what fields can be specified, please look at the [Prometheus Operator spec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec) - -Use the label selector field `ruleSelector` in the Prometheus object to define the rule files that you want to be mounted into Prometheus. - -For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - -### Connecting Routes and PrometheusRules - -When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Prometheus to figure out which Route should receive this Alert. For example, an Alert with the label `team: front-end` will be sent to all Routes that match on that label. - -### Creating PrometheusRules in the Rancher UI - -_Available as of v2.5.4_ - -> **Prerequisite:** The monitoring application needs to be installed. - -To create rule groups in the Rancher UI, - -1. Click **Cluster Explorer > Monitoring** and click **Prometheus Rules.** -1. Click **Create.** -1. Enter a **Group Name.** -1. Configure the rules. In Rancher's UI, we expect a rule group to contain either alert rules or recording rules, but not both. For help filling out the forms, refer to the configuration options below. -1. Click **Create.** - -**Result:** Alerts can be configured to send notifications to the receiver(s). - -# Configuration - -{{% tabs %}} -{{% tab "Rancher v2.5.4" %}} -Rancher v2.5.4 introduced the capability to configure PrometheusRules by filling out forms in the Rancher UI. - - -### Rule Group - -| Field | Description | -|-------|----------------| -| Group Name | The name of the group. Must be unique within a rules file. | -| Override Group Interval | Duration in seconds for how often rules in the group are evaluated. | - - -### Alerting Rules - -[Alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) allow you to define alert conditions based on PromQL (Prometheus Query Language) expressions and to send notifications about firing alerts to an external service. - -| Field | Description | -|-------|----------------| -| Alert Name | The name of the alert. Must be a valid label value. | -| Wait To Fire For | Duration in seconds. Alerts are considered firing once they have been returned for this long. Alerts which have not yet fired for long enough are considered pending. | -| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and all resultant time series will become pending/firing alerts. For more information, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../expression) | -| Labels | Labels to add or overwrite for each alert. | -| Severity | When enabled, labels are attached to the alert or record that identify it by the severity level. | -| Severity Label Value | Critical, warning, or none | -| Annotations | Annotations are a set of informational labels that can be used to store longer additional information, such as alert descriptions or runbook links. A [runbook](https://en.wikipedia.org/wiki/Runbook) is a set of documentation about how to handle alerts. The annotation values can be [templated.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#templating) | - -### Recording Rules - -[Recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) allow you to precompute frequently needed or computationally expensive PromQL (Prometheus Query Language) expressions and save their result as a new set of time series. - -| Field | Description | -|-------|----------------| -| Time Series Name | The name of the time series to output to. Must be a valid metric name. | -| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and the result will be recorded as a new set of time series with the metric name as given by 'record'. For more information about expressions, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../expression) | -| Labels | Labels to add or overwrite before storing the result. | - -{{% /tab %}} -{{% tab "Rancher v2.5.0-v2.5.3" %}} -For Rancher v2.5.0-v2.5.3, PrometheusRules must be configured in YAML. For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/migrating/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/migrating/_index.md deleted file mode 100644 index 3d1a5bd98..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/migrating/_index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Migrating to Rancher v2.5 Monitoring -weight: 5 -aliases: - - /rancher/v2.x/en/monitoring-alerting/migrating ---- - -If you previously enabled Monitoring, Alerting, or Notifiers in Rancher before v2.5, there is no automatic upgrade path for switching to the new monitoring/alerting solution. Before deploying the new monitoring solution via Cluster Explore, you will need to disable and remove all existing custom alerts, notifiers and monitoring installations for the whole cluster and in all projects. - -### Monitoring Before Rancher v2.5 - -As of v2.2.0, Rancher's Cluster Manager allowed users to enable Monitoring & Alerting V1 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) independently within a cluster. For more information on how to configure Monitoring & Alerting V1, see the [docs about monitoring before Rancher v2.5]({{}}/rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x). - -When Monitoring is enabled, Monitoring V1 deploys [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/docs/grafana/latest/getting-started/what-is-grafana/) onto a cluster to monitor the state of processes of your cluster nodes, Kubernetes components, and software deployments and create custom dashboards to make it easy to visualize collected metrics. - -Monitoring V1 could be configured on both a cluster-level and on a project-level and would automatically scrape certain workloads deployed as Apps on the Rancher cluster. - -When Alerts or Notifiers are enabled, Alerting V1 deploys [Prometheus Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) and a set of Rancher controllers onto a cluster that allows users to define alerts and configure alert-based notifications via Email, Slack, PagerDuty, etc. Users can choose to create different types of alerts depending on what needs to be monitored (e.g. System Services, Resources, CIS Scans, etc.); however, PromQL Expression-based alerts can only be created if Monitoring V1 is enabled. - -### Monitoring/Alerting via Cluster Explorer in Rancher 2.5 - -As of v2.5.0, Rancher's Cluster Explorer now allows users to enable Monitoring & Alerting V2 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) together within a cluster. - -Unlike in Monitoring & Alerting V1, both features are packaged in a single Helm chart found [here](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring). The behavior of this chart and configurable fields closely matches [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), a Prometheus Community Helm chart, and any deviations from the upstream chart can be found in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md) maintained with the chart. - -Monitoring V2 can only be configured on the cluster level. Project-level monitoring and alerting is no longer supported. - -For more information on how to configure Monitoring & Alerting V2, see [this page.]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration) - -### Changes to Role-based Access Control - -Project owners and members no longer get access to Grafana or Prometheus by default. If view-only users had access to Grafana, they would be able to see data from any namespace. For Kiali, any user can edit things they don’t own in any namespace. - -For more information about role-based access control in `rancher-monitoring`, refer to [this page.](../rbac) - -### Migrating from Monitoring V1 to Monitoring V2 - -While there is no automatic migration available, it is possible to manually migrate custom Grafana dashboards and alerts that were created in Monitoring V1 to Monitoring V2. - -Before you can install Monitoring V2, Monitoring V1 needs to be uninstalled completely. In order to uninstall Monitoring V1: - -* Remove all cluster and project specific alerts and alerts groups -* Remove all notifiers -* Disable all project monitoring installations under Cluster -> Project -> Tools -> Monitoring -* Ensure that all project-monitoring apps in all projects have been removed and are not recreated after a few minutes -* Disable the cluster monitoring installation under Cluster -> Tools -> Monitoring -* Ensure that the cluster-monitoring app and the monitoring-operator app in the System project have been removed and are not recreated after a few minutes - -#### Migrating Grafana Dashboards - -You can migrate any dashboard added to Grafana in Monitoring V1 to Monitoring V2. In Monitoring V1 you can export an existing dashboard like this: - -* Sign into Grafana -* Navigate to the dashboard you want to export -* Go to the dashboard settings -* Copy the [JSON Model](https://grafana.com/docs/grafana/latest/dashboards/json-model/) - -In the JSON Model, change all `datasource` fields from `RANCHER_MONITORING` to `Prometheus`. You can easily do this by replacing all occurrences of `"datasource": "RANCHER_MONITORING"` with `"datasource": "Prometheus"`. - -If Grafana is backed by a persistent volume, you can now [import](https://grafana.com/docs/grafana/latest/dashboards/export-import/) this JSON Model into the Monitoring V2 Grafana UI. -It is recommended to provide the dashboard to Grafana with a ConfigMap in the `cattle-dashboards` namespace that has the label `grafana_dashboard: "1"`: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: custom-dashboard - namespace: cattle-dashboards - labels: - grafana_dashboard: "1" -data: - custom-dashboard.json: | - { - ... - } -``` - -Once this ConfigMap is created, the dashboard will automatically be added to Grafana. - -#### Migrating Alerts - -It is only possible to directly migrate expression-based alerts to Monitoring V2. Fortunately, the event-based alerts that could be set up to alert on system component, node or workload events, are already covered out-of-the-box by the alerts that are part of Monitoring V2. So it is not necessary to migrate them. - -To migrate the following expression alert - -{{< img "/img/rancher/monitoring/migration/alert_2.4_to_2.5_source.png" "">}} - -you have to either create a PrometheusRule configuration like this in any namespace - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: custom-rules - namespace: default -spec: - groups: - - name: custom.rules - rules: - - alert: Custom Expression Alert - expr: prometheus_query > 5 - for: 5m - labels: - severity: critical - annotations: - summary: "The result of prometheus_query has been larger than 5 for 5m. Current value {{ $value }}" -``` - -or add the Prometheus Rule through the Cluster Explorer - -{{< img "/img/rancher/monitoring/migration/alert_2.4_to_2.5_target.png" "">}} - -For more details on how to configure PrometheusRules in Monitoring V2 see [Monitoring Configuration]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration#prometheusrules). - -#### Migrating notifiers - -There is no direct equivalent for how notifiers work in Monitoring V1. Instead you have to replicate the desired setup with [Routes and Receivers]({{}}/rancher/v2.x/en/monitoring-alerting/v2.5/configuration#alertmanager-config) in Monitoring V2. diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/persist-grafana/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/persist-grafana/_index.md deleted file mode 100644 index d0d8823ca..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/persist-grafana/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Persistent Grafana Dashboards -weight: 4 ---- - -To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - -> **Prerequisites:** -> -> - The monitoring application needs to be installed. -> - You must have the cluster-admin ClusterRole permission. - -1. Open the Grafana dashboard. From the **Cluster Explorer,** click **Cluster Explorer > Monitoring.** -1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. (Regardless of who has the password, cluster administrator permission in Rancher is still required access the Grafana instance.) Alternative credentials can also be supplied on deploying or upgrading the chart. -1. Go to the dashboard that you want to persist. In the top navigation menu, go to the dashboard settings by clicking the gear icon. -1. In the left navigation menu, click **JSON Model.** -1. Copy the JSON data structure that appears. -1. Create a ConfigMap in the `cattle-dashboards` namespace. The ConfigMap needs to have the label `grafana_dashboard: "1"`. Paste the JSON into the ConfigMap in the format shown in the example below: - - ```yaml - apiVersion: v1 - kind: ConfigMap - metadata: - labels: - grafana_dashboard: "1" - name: - namespace: cattle-dashboards - data: - .json: |- - - ``` - -**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. - -Dashboards that are persisted using ConfigMaps cannot be deleted from the Grafana UI. If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. diff --git a/content/rancher/v2.x/en/monitoring-alerting/v2.5/rbac/_index.md b/content/rancher/v2.x/en/monitoring-alerting/v2.5/rbac/_index.md deleted file mode 100644 index df44189e6..000000000 --- a/content/rancher/v2.x/en/monitoring-alerting/v2.5/rbac/_index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: RBAC -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/monitoring/rbac - - /rancher/v2.x/en/monitoring-alerting/rbac ---- -This section describes the expectations for RBAC for Rancher Monitoring. - -## Cluster Admins - -By default, only those with the cluster-admin `ClusterRole` should be able to: - -- Install the `rancher-monitoring` App onto a cluster and all other relevant configuration performed on the chart deploy - - e.g. whether default dashboards are created, what exporters are deployed onto the cluster to collect metrics, etc. -- Create / modify / delete Prometheus deployments in the cluster via Prometheus CRs -- Create / modify / delete Alertmanager deployments in the cluster via Alertmanager CRs -- Persist new Grafana dashboards or datasources via creating ConfigMaps in the appropriate namespace -- Expose certain Prometheus metrics to the k8s Custom Metrics API for HPA via a Secret in the `cattle-monitoring-system` namespace - -## Users with k8s ClusterRole-based Permissions - -The `rancher-monitoring` chart installs the following three `ClusterRoles`. By default, they aggregate into the corresponding k8s `ClusterRoles`: - -| ClusterRole | Aggregates To Default K8s ClusterRole | -| ------------------------------| ---------------------------| -| `monitoring-admin` | `admin`| -| `monitoring-edit` | `edit` | -| `monitoring-view` | `view ` | - -These `ClusterRoles` provide different levels of access to the Monitoring CRDs based on the actions that can be performed: - -| CRDs (monitoring.coreos.com) | Admin | Edit | View | -| ------------------------------| ---------------------------| ---------------------------| ---------------------------| -|
  • `prometheuses`
  • `alertmanagers`
| Get, List, Watch | Get, List, Watch | Get, List, Watch | -|
  • `servicemonitors`
  • `podmonitors`
  • `prometheusrules`
| * | * | Get, List, Watch | - -On a high level, the following permissions are assigned by default as a result. - -### Users with k8s Admin / Edit Permissions - -Only those with the the cluster-admin / admin / edit `ClusterRole` should be able to: - -- Modify the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs -- Modify the alerting / recording rules of a Prometheus deployment via PrometheusRules CRs - -### Users with k8s View Permissions - -Only those with who have some k8s `ClusterRole` should be able to: - -- View the configuration of Prometheuses that are deployed within the cluster -- View the configuration of Alertmanagers that are deployed within the cluster -- View the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs -- View the alerting / recording rules of a Prometheus deployment via PrometheusRules CRs - -## Additional Monitoring Roles - -Monitoring also creates six additional `Roles` that are not assigned to users by default but are created within the cluster. Admins should use these roles to provide more fine-grained access to users: - -| Role | Purpose | -| ------------------------------| ---------------------------| -| monitoring-config-admin | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | -| monitoring-config-edit | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | -| monitoring-config-view | Allow admins to assign roles to users to be able to view Secrets and ConfigMaps within the cattle-monitoring-system namespace. Viewing Secrets / ConfigMaps in this namespace could allow users to observe the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | -| monitoring-dashboard-admin | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | -| monitoring-dashboard-edit | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | -| monitoring-dashboard-view | Allow admins to assign roles to users to be able to view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | - -## Users with Rancher Cluster Manager Based Permissions - -The relationship between the default roles deployed by Rancher Cluster Manager (i.e. cluster-owner, cluster-member, project-owner, project-member), the default k8s roles, and the roles deployed by the rancher-monitoring chart are detailed in the table below: - -| Cluster Manager Role | k8s Role | Monitoring ClusterRole / Role | ClusterRoleBinding or RoleBinding? | -| --------- | --------- | --------- | --------- | -| cluster-owner | cluster-admin | N/A | ClusterRoleBinding | -| cluster-member | admin | monitoring-admin | ClusterRoleBinding | -| project-owner | edit | monitoring-admin | RoleBinding within Project namespace | -| project-member | view | monitoring-edit | RoleBinding within Project namespace | - -### Differences in 2.5.x - -Users with the project-member or project-owners roles assigned will not be given access to either Prometheus or Grafana in Rancher 2.5.x since we only create Grafana or Prometheus on a cluster-level. - -In addition, while project owners will still be only able to add ServiceMonitors / PodMonitors that scrape resources within their project's namespace by default, PrometheusRules are not scoped to a single namespace / project. Therefore, any alert rules or recording rules created by project-owners within their project namespace will be applied across the entire cluster, although they will be unable to view / edit / delete any rules that were created outside the project's namespace. - -### Assigning Additional Access - -If cluster-admins would like to provide additional admin/edit access to users outside of the roles offered by the rancher-monitoring chart, the following table identifies the potential impact: - -|CRDs (monitoring.coreos.com) | Can it cause impact outside of a namespace / project? | Impact | -|----------------------------| ------| ----------------------------| -| `prometheuses`| Yes, this resource can scrape metrics from any targets across the entire cluster (unless the Operator itself is otherwise configured). | User will be able to define the configuration of new cluster-level Prometheus deployments that should be created in the cluster. | -| `alertmanagers`| No | User will be able to define the configuration of new cluster-level Alertmanager deployments that should be created in the cluster. Note: if you just want to allow users to configure settings like Routes and Receivers, you should just provide access to the Alertmanager Config Secret instead. | -|
  • `servicemonitors`
  • `podmonitors`
| No, not by default; this is configurable via `ignoreNamespaceSelectors` on the Prometheus CR. | User will be able to set up scrapes by Prometheus on endpoints exposed by Services / Pods within the namespace they are given this permission in. | -| `prometheusrules`| Yes, PrometheusRules are cluster-scoped. | User will be able to define alert or recording rules on Prometheus based on any series collected across the entire cluster. | - -| k8s Resources | Namespace | Can it cause impact outside of a namespace / project? | Impact | -|----------------------------| ------| ------| ----------------------------| -|
  • `secrets`
  • `configmaps`
| `cattle-monitoring-system` | Yes, Configs and Secrets in this namespace can impact the entire monitoring / alerting pipeline. | User will be able to create or edit Secrets / ConfigMaps such as the Alertmanager Config, Prometheus Adapter Config, TLS secrets, additional Grafana datasources, etc. This can have broad impact on all cluster monitoring / alerting. | -|
  • `secrets`
  • `configmaps`
| `cattle-dashboards` | Yes, Configs and Secrets in this namespace can create dashboards that make queries on all metrics collected at a cluster-level. | User will be able to create Secrets / ConfigMaps that persist new Grafana Dashboards only. | diff --git a/content/rancher/v2.x/en/opa-gatekeper/_index.md b/content/rancher/v2.x/en/opa-gatekeper/_index.md deleted file mode 100644 index d85647418..000000000 --- a/content/rancher/v2.x/en/opa-gatekeper/_index.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: OPA Gatekeeper -weight: 17 -aliases: - - /rancher/v2.x/en/cluster-admin/tools/opa-gatekeeper - - /rancher/v2.x/en/opa-gatekeper/Open%20Policy%20Agent ---- -_Available as of v2.4.0_ - -To ensure consistency and compliance, every organization needs the ability to define and enforce policies in its environment in an automated way. [OPA (Open Policy Agent)](https://www.openpolicyagent.org/) is a policy engine that facilitates policy-based control for cloud native environments. Rancher provides the ability to enable OPA Gatekeeper in Kubernetes clusters, and also installs a couple of built-in policy definitions, which are also called constraint templates. - -OPA provides a high-level declarative language that lets you specify policy as code and ability to extend simple APIs to offload policy decision-making. - -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is a project that provides integration between OPA and Kubernetes. OPA Gatekeeper provides: - -- An extensible, parameterized policy library. -- Native Kubernetes CRDs for instantiating the policy library, also called “constraints." -- Native Kubernetes CRDs for extending the policy library, also called "constraint templates." -- Audit functionality. - -To read more about OPA, please refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/) - -# How the OPA Gatekeeper Integration Works - -Kubernetes provides the ability to extend API server functionality via admission controller webhooks, which are invoked whenever a resource is created, updated or deleted. Gatekeeper is installed as a validating webhook and enforces policies defined by Kubernetes custom resource definitions. In addition to the admission control usage, Gatekeeper provides the capability to audit existing resources in Kubernetes clusters and mark current violations of enabled policies. - -OPA Gatekeeper is made available via Rancher's Helm system chart, and it is installed in a namespace named `gatekeeper-system.` - -# Enabling OPA Gatekeeper in a Cluster - -In Rancher v2.5, the OPA Gatekeeper application was improved. The Rancher v2.4 feature can't be upgraded to the new version in Rancher v2.5. If you installed OPA Gatekeeper in Rancher v2.4, you will need to uninstall OPA Gatekeeper and its CRDs from the old UI, then reinstall it in Rancher v2.5. To uninstall the CRDs run the following command in the kubectl console `kubectl delete crd configs.config.gatekeeper.sh constrainttemplates.templates.gatekeeper.sh`. - -{{% tabs %}} -{{% tab "Rancher v2.5" %}} - -> **Prerequisite:** Only administrators and cluster owners can enable OPA Gatekeeper. - -OPA Gatekeeper can be installed from the new **Cluster Explorer** view in Rancher v2.5, or from the legacy cluster view. - -### Enabling OPA Gatekeeper from Cluster Explorer - -1. Go to the cluster view in the Rancher UI. Click **Cluster Explorer.** -1. Click **Apps** in the top navigation bar. -1. Click **OPA Gatekeeper.** -1. Click **Install.** - -**Result:** OPA Gatekeeper is deployed in your Kubernetes cluster. - -### Enabling OPA Gatekeeper from the Legacy Cluster View - -1. Go to the cluster view in the Rancher UI. -1. Click **Tools > OPA Gatekeeper.** -1. Click **Install.** - -**Result:** OPA Gatekeeper is deployed in your Kubernetes cluster. - -{{% /tab %}} -{{% tab "Rancher v2.4" %}} - -> **Prerequisites:** -> -> - Only administrators and cluster owners can enable OPA Gatekeeper. -> - The dashboard needs to be enabled using the `dashboard` feature flag. For more information, refer to the [section on enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) - -1. Navigate to the cluster's **Dashboard** view. -1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** -1. To install Gatekeeper with the default configuration, click on **Enable Gatekeeper (v0.1.0) with defaults.** -1. To change any default configuration, click on **Customize Gatekeeper yaml configuration.** -{{% /tab %}} -{{% /tabs %}} - -# Constraint Templates - -[Constraint templates](https://github.com/open-policy-agent/gatekeeper#constraint-templates) are Kubernetes custom resources that define the schema and Rego logic of the OPA policy to be applied by Gatekeeper. For more information on the Rego policy language, refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/policy-language/) - -When OPA Gatekeeper is enabled, Rancher installs some templates by default. - -To list the constraint templates installed in the cluster, go to the left side menu under OPA Gatekeeper and click on **Templates.** - -Rancher also provides the ability to create your own constraint templates by importing YAML definitions. - -# Creating and Configuring Constraints - -[Constraints](https://github.com/open-policy-agent/gatekeeper#constraints) are Kubernetes custom resources that define the scope of objects to which a specific constraint template applies to. The complete policy is defined by constraint templates and constraints together. - -> **Prerequisites:** OPA Gatekeeper must be enabled in the cluster. - -To list the constraints installed, go to the left side menu under OPA Gatekeeper, and click on **Constraints.** - -New constraints can be created from a constraint template. - -Rancher provides the ability to create a constraint by using a convenient form that lets you input the various constraint fields. - -The **Edit as yaml** option is also available to configure the the constraint's yaml definition. - -### Exempting Rancher's System Namespaces from Constraints - -When a constraint is created, ensure that it does not apply to any Rancher or Kubernetes system namespaces. If the system namespaces are not excluded, then it is possible to see many resources under them marked as violations of the constraint. - -To limit the scope of the constraint only to user namespaces, always specify these namespaces under the **Match** field of the constraint. - -Also, the constraint may interfere with other Rancher functionality and deny system workloads from being deployed. To avoid this, exclude all Rancher-specific namespaces from your constraints. - -# Enforcing Constraints in your Cluster - -When the **Enforcement Action** is **Deny,** the constraint is immediately enabled and will deny any requests that violate the policy defined. By default, the enforcement value is **Deny.** - -When the **Enforcement Action** is **Dryrun,** then any resources that violate the policy are only recorded under the constraint's status field. - -To enforce constraints, create a constraint using the form. In the **Enforcement Action** field, choose **Deny.** - -# Audit and Violations in your Cluster - -OPA Gatekeeper runs a periodic audit to check if any existing resource violates any enforced constraint. The audit-interval (default 300s) can be configured while installing Gatekeeper. - -On the Gatekeeper page, any violations of the defined constraints are listed. - -Also under **Constraints,** the number of violations of the constraint can be found. - -The detail view of each constraint lists information about the resource that violated the constraint. - -# Disabling Gatekeeper - -1. Navigate to the cluster's Dashboard view -1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** -1. Click the **⋮ > Disable**. - -**Result:** Upon disabling OPA Gatekeeper, all constraint templates and constraints will also be deleted. - diff --git a/content/rancher/v2.x/en/overview/_index.md b/content/rancher/v2.x/en/overview/_index.md deleted file mode 100644 index e25fe0107..000000000 --- a/content/rancher/v2.x/en/overview/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Overview -weight: 1 ---- -Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. - -# Run Kubernetes Everywhere - -Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. - -# Meet IT requirements - -Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: - -- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. -- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. -- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. - -# Empower DevOps Teams - -Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. - -The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. - -![Platform]({{}}/img/rancher/platform.png) - -# Features of the Rancher API Server - -The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: - -### Authorization and Role-Based Access Control - -- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.x/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. -- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.x/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/) policies. - -### Working with Kubernetes - -- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes) -- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.x/en/catalog/) that make it easy to repeatedly deploy applications. -- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.x/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.x/en/k8s-in-rancher/) -- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.x/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. -- **Istio:** Our [integration with Istio]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -### Working with Cloud Infrastructure - -- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes/) in all clusters. -- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) in the cloud. - -### Cluster Visibility - -- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. -- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. -- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. - -# Editing Downstream Clusters with Rancher - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.x/en/cluster-provisioning/cluster-capabilities-table" %}} diff --git a/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md b/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md deleted file mode 100644 index 0e902d018..000000000 --- a/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Architecture Recommendations -weight: 3 ---- - -Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) - -This section covers the following topics: - -- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) -- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) -- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-kubernetes-installations) -- [Environment for Kubernetes Installations](#environment-for-kubernetes-installations) -- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-kubernetes-installations) -- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) - -# Separation of Rancher and User Clusters - -A user cluster is a downstream Kubernetes cluster that runs your apps and services. - -If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. - -In Kubernetes installations of Rancher, the Rancher server cluster should also be separate from the user clusters. - -![Separation of Rancher Server from User Clusters]({{}}/img/rancher/rancher-architecture-separation-of-rancher-server.svg) - -# Why HA is Better for Rancher in Production - -We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. - -We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. - -Rancher needs to be installed on either a high-availability [RKE (Rancher Kubernetes Engine)]({{}}/rke/latest/en/) Kubernetes cluster, or a high-availability [K3s (Lightweight Kubernetes)]({{}}/k3s/latest/en/) Kubernetes cluster. Both RKE and K3s are fully certified Kubernetes distributions. - -### K3s Kubernetes Cluster Installations - -If you are installing Rancher v2.4 for the first time, we recommend installing it on a K3s Kubernetes cluster. One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. - -The option to install Rancher on a K3s cluster is a feature introduced in Rancher v2.4. K3s is easy to install, with half the memory of Kubernetes, all in a binary less than 100 MB. - -
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) - -### RKE Kubernetes Cluster Installations - -If you are installing Rancher before v2.4, you will need to install Rancher on an RKE cluster, in which the cluster data is stored on each node with the etcd role. As of Rancher v2.4, there is no migration path to transition the Rancher server from an RKE cluster to a K3s cluster. All versions of the Rancher server, including v2.4+, can be installed on an RKE cluster. - -In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. - -
Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) - -# Recommended Load Balancer Configuration for Kubernetes Installations - -We recommend the following configurations for the load balancer and Ingress controllers: - -* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
-![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -# Environment for Kubernetes Installations - -It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. - -For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. - -# Recommended Node Roles for Kubernetes Installations - -Our recommendations for the roles of each node differ depending on whether Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. - -### K3s Cluster Roles - -In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. - -For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. - -### RKE Cluster Roles - -If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. - -### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters - -Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. - -Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. - -For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. - -![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters]({{}}/img/rancher/rancher-architecture-node-roles.svg) - -RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. - -We recommend that downstream user clusters should have at least: - -- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available -- **Two nodes with only the controlplane role** to make the master component highly available -- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services - -With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. - -For more best practices for downstream clusters, refer to the [production checklist]({{}}/rancher/v2.x/en/cluster-provisioning/production) or our [best practices guide.]({{}}/rancher/v2.x/en/best-practices/v2.5/) - -# Architecture for an Authorized Cluster Endpoint - -If you are using an [authorized cluster endpoint,]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. - -If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files]({{}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) and [API keys]({{}}/rancher/v2.x/en/user-settings/api-keys/#creating-an-api-key) for more information. \ No newline at end of file diff --git a/content/rancher/v2.x/en/overview/architecture/_index.md b/content/rancher/v2.x/en/overview/architecture/_index.md deleted file mode 100644 index 8a9b84818..000000000 --- a/content/rancher/v2.x/en/overview/architecture/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Architecture -weight: 1 ---- - -This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. - -For information on the different ways that Rancher can be installed, refer to the [overview of installation options.]({{}}/rancher/v2.x/en/installation/#overview-of-installation-options) - -For a list of main features of the Rancher API server, refer to the [overview section.]({{}}/rancher/v2.x/en/overview/#features-of-the-rancher-api-server) - -For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.]({{}}/rancher/v2.x/en/overview/architecture-recommendations) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. - -This section covers the following topics: - -- [Rancher server architecture](#rancher-server-architecture) -- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) - - [The authentication proxy](#1-the-authentication-proxy) - - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) - - [Node agents](#3-node-agents) - - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) -- [Important files](#important-files) -- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) -- [Rancher server components and source code](#rancher-server-components-and-source-code) - -# Rancher Server Architecture - -The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). - -For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy: - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -You can install Rancher on a single node, or on a high-availability Kubernetes cluster. - -A high-availability Kubernetes installation is recommended for production. - -A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: - -- For Rancher v2.0-v2.4, there was no migration path from a Docker installation to a high-availability installation. Therefore, if you are using Rancher before v2.5, you may want to use a Kubernetes installation from the start. - -- For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.x/en/backups/v2.5/migrating-rancher/) - -The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. - -# Communicating with Downstream User Clusters - -This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. - -The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. - -
Communicating with Downstream Clusters
- -![Rancher Components]({{}}/img/rancher/rancher-architecture-cluster-controller.svg) - -The following descriptions correspond to the numbers in the diagram above: - -1. [The Authentication Proxy](#1-the-authentication-proxy) -2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) -3. [Node Agents](#3-node-agents) -4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) - -### 1. The Authentication Proxy - -In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see -the pods. Bob is authenticated through Rancher's authentication proxy. - -The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. - -Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. - -By default, Rancher generates a [kubeconfig file]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. - -### 2. Cluster Controllers and Cluster Agents - -Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. - -There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: - -- Watches for resource changes in the downstream cluster -- Brings the current state of the downstream cluster to the desired state -- Configures access control policies to clusters and projects -- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE - -By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. - -The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: - -- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters -- Manages workloads, pod creation and deployment within each cluster -- Applies the roles and bindings defined in each cluster's global policies -- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health - -### 3. Node Agents - -If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. - -The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. - -### 4. Authorized Cluster Endpoint - -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. - -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. - -There are two main reasons why a user might need the authorized cluster endpoint: - -- To access a downstream user cluster while Rancher is down -- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. - -> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. - -With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. - -You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl) - -# Important Files - -The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. -- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) documentation. - -# Tools for Provisioning Kubernetes Clusters - -The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. - -### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider - -Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) - -### Rancher Launched Kubernetes for Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. - -Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) - -### Hosted Kubernetes Providers - -When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) - -### Imported Kubernetes Clusters - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -# Rancher Server Components and Source Code - -This diagram shows each component that the Rancher server is composed of: - -![Rancher Components]({{}}/img/rancher/rancher-architecture-rancher-components.svg) - -The GitHub repositories for Rancher can be found at the following links: - -- [Main Rancher server repository](https://github.com/rancher/rancher) -- [Rancher UI](https://github.com/rancher/ui) -- [Rancher API UI](https://github.com/rancher/api-ui) -- [Norman,](https://github.com/rancher/norman) Rancher's API framework -- [Types](https://github.com/rancher/types) -- [Rancher CLI](https://github.com/rancher/cli) -- [Catalog applications](https://github.com/rancher/helm) - -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.]({{}}/rancher/v2.x/en/contributing/#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/content/rancher/v2.x/en/overview/concepts/_index.md b/content/rancher/v2.x/en/overview/concepts/_index.md deleted file mode 100644 index f4fe9fc26..000000000 --- a/content/rancher/v2.x/en/overview/concepts/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Kubernetes Concepts -weight: 4 ---- - -This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) - -This section covers the following topics: - -- [About Docker](#about-docker) -- [About Kubernetes](#about-kubernetes) -- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) -- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) - - [etcd Nodes](#etcd-nodes) - - [Controlplane Nodes](#controlplane-nodes) - - [Worker Nodes](#worker-nodes) -- [About Helm](#about-helm) - -# About Docker - -Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. - ->**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. - -# About Kubernetes - -Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. - -# What is a Kubernetes Cluster? - -A cluster is a group of computers that work together as a single system. - -A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. - -# Roles for Nodes in Kubernetes Clusters - -Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. - -A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. - -### etcd Nodes - -Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. - -The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. - -The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. - -Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. - -Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Controlplane Nodes - -Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although two or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. - -### Worker Nodes - -Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: - -- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. -- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. - -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/). - -# About Helm - -For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. - -Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). - -For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) \ No newline at end of file diff --git a/content/rancher/v2.x/en/pipelines/_index.md b/content/rancher/v2.x/en/pipelines/_index.md deleted file mode 100644 index 107914aa2..000000000 --- a/content/rancher/v2.x/en/pipelines/_index.md +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: Pipelines -weight: 11 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/pipelines ---- - -> As of Rancher v2.5, Git-based deployment pipelines are now recommended to be handled with Rancher Continuous Delivery powered by [Fleet,]({{}}/rancher/v2.x/en/deploy-across-clusters/fleet) available in Cluster Explorer. - -Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. - -Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - ->**Notes:** -> ->- Pipelines improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/docs-for-v2.0.x). ->- Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. - -This section covers the following topics: - -- [Concepts](#concepts) -- [How Pipelines Work](#how-pipelines-work) -- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) -- [Setting up Pipelines](#setting-up-pipelines) - - [Configure version control providers](#1-configure-version-control-providers) - - [Configure repositories](#2-configure-repositories) - - [Configure the pipeline](#3-configure-the-pipeline) -- [Pipeline Configuration Reference](#pipeline-configuration-reference) -- [Running your Pipelines](#running-your-pipelines) -- [Triggering a Pipeline](#triggering-a-pipeline) - - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) - -# Concepts - -For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/concepts) - -# How Pipelines Work - -After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. - -A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. - -Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. - -When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: - - - **Jenkins:** - - The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. - - >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. - - - **Docker Registry:** - - Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. - - - **Minio:** - - Minio storage is used to store the logs for pipeline executions. - - >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/storage). - -# Roles-based Access Control for Pipelines - -If you can access a project, you can enable repositories to start building pipelines. - -Only [administrators]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. - -Project members can only configure repositories and pipelines. - -# Setting up Pipelines - -To set up pipelines, you will need to do the following: - -1. [Configure version control providers](#1-configure-version-control-providers) -2. [Configure repositories](#2-configure-repositories) -3. [Configure the pipeline](#3-configure-the-pipeline) - -### 1. Configure Version Control Providers - -Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider. - -| Provider | Available as of | -| --- | --- | -| GitHub | v2.0.0 | -| GitLab | v2.1.0 | -| Bitbucket | v2.2.0 | - -Select your provider's tab below and follow the directions. - -{{% tabs %}} -{{% tab "GitHub" %}} -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. - -1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to setup an OAuth App in Github. - -1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - -1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "GitLab" %}} - -_Available as of v2.1.0_ - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. - -1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. - -1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. - -1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. - -1. Click **Authenticate**. - ->**Note:** -> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. -> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. -{{% /tab %}} -{{% tab "Bitbucket Cloud" %}} - -_Available as of v2.2.0_ - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use public Bitbucket Cloud** option. - -1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. - -1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "Bitbucket Server" %}} - -_Available as of v2.2.0_ - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use private Bitbucket Server setup** option. - -1. Follow the directions displayed to **Setup a Bitbucket Server application**. - -1. Enter the host address of your Bitbucket server installation. - -1. Click **Authenticate**. - ->**Note:** -> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: -> -> 1. Setup Rancher server with a certificate from a trusted CA. -> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). -> -{{% /tab %}} -{{% /tabs %}} - -**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. - -### 2. Configure Repositories - -After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Click on **Configure Repositories**. - -1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. - -1. For each repository that you want to set up a pipeline, click on **Enable**. - -1. When you're done enabling all your repositories, click on **Done**. - -**Results:** You have a list of repositories that you can start configuring pipelines for. - -### 3. Configure the Pipeline - -Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the repository that you want to set up a pipeline for. - -1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/config) - - * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. - * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. - -1. Select which `branch` to use from the list of branches. - -1. _Available as of v2.2.0_ Optional: Set up notifications. - -1. Set up the trigger rules for the pipeline. - -1. Enter a **Timeout** for the pipeline. - -1. When all the stages and steps are configured, click **Done**. - -**Results:** Your pipeline is now configured and ready to be run. - - -# Pipeline Configuration Reference - -Refer to [this page]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: - -- Run a script -- Build and publish images -- Publish catalog templates -- Deploy YAML -- Deploy a catalog app - -The configuration reference also covers how to configure: - -- Notifications -- Timeouts -- The rules that trigger a pipeline -- Environment variables -- Secrets - - -# Running your Pipelines - -Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions before v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **⋮ > Run**. - -During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: - -- `docker-registry` -- `jenkins` -- `minio` - -This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. - -# Triggering a Pipeline - -When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. - -Available Events: - -* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. -* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. -* **Tag**: When a tag is created in the repository, the pipeline is triggered. - -> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/). - -### Modifying the Event Triggers for the Repository - -1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. - -1. 1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the repository that you want to modify the event triggers. Select the vertical **⋮ > Setting**. - -1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. - -1. Click **Save**. diff --git a/content/rancher/v2.x/en/pipelines/concepts/_index.md b/content/rancher/v2.x/en/pipelines/concepts/_index.md deleted file mode 100644 index 852d7e50a..000000000 --- a/content/rancher/v2.x/en/pipelines/concepts/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Concepts -weight: 1 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/pipelines/concepts ---- - -The purpose of this page is to explain common concepts and terminology related to pipelines. - -- **Pipeline:** - - A _pipeline_ is a software delivery process that is broken into different stages and steps. Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. A pipeline is based on a specific repository. It defines the process to build, test, and deploy your code. Rancher uses the [pipeline as code](https://jenkins.io/doc/book/pipeline-as-code/) model. Pipeline configuration is represented as a pipeline file in the source code repository, using the file name `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. - -- **Stages:** - - A pipeline stage consists of multiple steps. Stages are executed in the order defined in the pipeline file. The steps in a stage are executed concurrently. A stage starts when all steps in the former stage finish without failure. - -- **Steps:** - - A pipeline step is executed inside a specified stage. A step fails if it exits with a code other than `0`. If a step exits with this failure code, the entire pipeline fails and terminates. - -- **Workspace:** - - The workspace is the working directory shared by all pipeline steps. In the beginning of a pipeline, source code is checked out to the workspace. The command for every step bootstraps in the workspace. During a pipeline execution, the artifacts from a previous step will be available in future steps. The working directory is an ephemeral volume and will be cleaned out with the executor pod when a pipeline execution is finished. - -Typically, pipeline stages include: - -- **Build:** - - Each time code is checked into your repository, the pipeline automatically clones the repo and builds a new iteration of your software. Throughout this process, the software is typically reviewed by automated tests. - -- **Publish:** - - After the build is completed, either a Docker image is built and published to a Docker registry or a catalog template is published. - -- **Deploy:** - - After the artifacts are published, you would release your application so users could start using the updated product. diff --git a/content/rancher/v2.x/en/pipelines/config/_index.md b/content/rancher/v2.x/en/pipelines/config/_index.md deleted file mode 100644 index 911200130..000000000 --- a/content/rancher/v2.x/en/pipelines/config/_index.md +++ /dev/null @@ -1,660 +0,0 @@ ---- -title: Pipeline Configuration Reference -weight: 1 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/pipelines/config ---- - -In this section, you'll learn how to configure pipelines. - -- [Step Types](#step-types) -- [Step Type: Run Script](#step-type-run-script) -- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) -- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) -- [Step Type: Deploy YAML](#step-type-deploy-yaml) -- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) -- [Notifications](#notifications) -- [Timeouts](#timeouts) -- [Triggers and Trigger Rules](#triggers-and-trigger-rules) -- [Environment Variables](#environment-variables) -- [Secrets](#secrets) -- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) -- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) - - [Executor Quota](#executor-quota) - - [Resource Quota for Executors](#resource-quota-for-executors) - - [Custom CA](#custom-ca) -- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) -- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) - -# Step Types - -Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. - -Step types include: - -- [Run Script](#step-type-run-script) -- [Build and Publish Images](#step-type-build-and-publish-images) -- [Publish Catalog Template](#step-type-publish-catalog-template) -- [Deploy YAML](#step-type-deploy-yaml) -- [Deploy Catalog App](#step-type-deploy-catalog-app) - - - -### Configuring Steps By UI - -If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. - -1. Add stages to your pipeline execution by clicking **Add Stage**. - - 1. Enter a **Name** for each stage of your pipeline. - 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. - -1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. - -### Configuring Steps by YAML - -For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com -``` -# Step Type: Run Script - -The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configuring Script by UI - -1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. - -1. Click **Add**. - -### Configuring Script by YAML -```yaml -# example -stages: -- name: Build something - steps: - - runScriptConfig: - image: golang - shellScript: go build -``` -# Step Type: Build and Publish Images - -_Available as of Rancher v2.1.0_ - -The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. - -The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. - -### Configuring Building and Publishing Images by UI -1. From the **Step Type** drop-down, choose **Build and Publish**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | - Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | - Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | - Build Context

(**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). - -### Configuring Building and Publishing Images by YAML - -You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: - -Variable Name | Description -------------------------|------------------------------------------------------------ -PLUGIN_DRY_RUN | Disable docker push -PLUGIN_DEBUG | Docker daemon executes in debug mode -PLUGIN_MIRROR | Docker daemon registry mirror -PLUGIN_INSECURE | Docker daemon allows insecure registries -PLUGIN_BUILD_ARGS | Docker build args, a comma separated list - -
- -```yaml -# This example shows an environment variable being used -# in the Publish Image step. This variable allows you to -# publish an image to an insecure registry: - -stages: -- name: Publish Image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - pushRemote: true - registry: example.com - env: - PLUGIN_INSECURE: "true" -``` - -# Step Type: Publish Catalog Template - -_Available as of v2.2.0_ - -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository]({{}}/rancher/v2.x/en/catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. - -### Configuring Publishing a Catalog Template by UI - -1. From the **Step Type** drop-down, choose **Publish Catalog Template**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | - Catalog Template Name | The name of the template. For example, wordpress. | - Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | - Protocol | You can choose to publish via HTTP(S) or SSH protocol. | - Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | - Git URL | The Git URL of the chart repository that the template will be published to. | - Git Branch | The Git branch of the chart repository that the template will be published to. | - Author Name | The author name used in the commit message. | - Author Email | The author email used in the commit message. | - - -### Configuring Publishing a Catalog Template by YAML - -You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: - -* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. -* CatalogTemplate: The name of the template. -* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. -* GitUrl: The git URL of the chart repository that the template will be published to. -* GitBranch: The git branch of the chart repository that the template will be published to. -* GitAuthor: The author name used in the commit message. -* GitEmail: The author email used in the commit message. -* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. - -```yaml -# example -stages: -- name: Publish Wordpress Template - steps: - - publishCatalogConfig: - path: ./charts/wordpress/latest - catalogTemplate: wordpress - version: ${CICD_GIT_TAG} - gitUrl: git@github.com:myrepo/charts.git - gitBranch: master - gitAuthor: example-user - gitEmail: user@example.com - envFrom: - - sourceName: publish-keys - sourceKey: DEPLOY_KEY -``` - -# Step Type: Deploy YAML - -This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configure Deploying YAML by UI - -1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. - -1. Enter the **YAML Path**, which is the path to the manifest file in the source code. - -1. Click **Add**. - -### Configure Deploying YAML by YAML - -```yaml -# example -stages: -- name: Deploy - steps: - - applyYamlConfig: - path: ./deployment.yaml -``` - -# Step Type :Deploy Catalog App - -_Available as of v2.2.0_ - -The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. - -### Configure Deploying Catalog App by UI - -1. From the **Step Type** drop-down, choose **Deploy Catalog App**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Catalog | The catalog from which the app template will be used. | - Template Name | The name of the app template. For example, wordpress. | - Template Version | The version of the app template you want to deploy. | - Namespace | The target namespace where you want to deploy the app. | - App Name | The name of the app you want to deploy. | - Answers | Key-value pairs of answers used to deploy the app. | - - -### Configure Deploying Catalog App by YAML - -You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: - -* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. -* Version: The version of the template you want to deploy. -* Answers: Key-value pairs of answers used to deploy the app. -* Name: The name of the app you want to deploy. -* TargetNamespace: The target namespace where you want to deploy the app. - -```yaml -# example -stages: -- name: Deploy App - steps: - - applyAppConfig: - catalogTemplate: cattle-global-data:library-mysql - version: 0.3.8 - answers: - persistence.enabled: "false" - name: testmysql - targetNamespace: test -``` - -# Timeouts - -By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. - -### Configuring Timeouts by UI - -Enter a new value in the **Timeout** field. - -### Configuring Timeouts by YAML - -In the `timeout` section, enter the timeout value in minutes. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -# timeout in minutes -timeout: 30 -``` - -# Notifications - -You can enable notifications to any [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/notifiers/#adding-notifiers) so it will be easy to add recipients immediately. - -### Configuring Notifications by UI - -_Available as of v2.2.0_ - -1. Within the **Notification** section, turn on notifications by clicking **Enable**. - -1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. - -1. If you don't have any existing [notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.x/en/monitoring-alerting/legacy/notifiers/#adding-notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. - - > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. - -1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. - -### Configuring Notifications by YAML -_Available as of v2.2.0_ - -In the `notification` section, you will provide the following information: - -* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. - * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. - * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. -* **Condition:** Select which conditions of when you want the notification to be sent. -* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. - -```yaml -# Example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` - -# Triggers and Trigger Rules - -After you configure a pipeline, you can trigger it using different methods: - -- **Manually:** - - After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. - -- **Automatically:** - - When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. - - To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. - -Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: - -- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. - -- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. - -If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. - -Wildcard character (`*`) expansion is supported in `branch` conditions. - -This section covers the following topics: - -- [Configuring pipeline triggers](#configuring-pipeline-triggers) -- [Configuring stage triggers](#configuring-stage-triggers) -- [Configuring step triggers](#configuring-step-triggers) -- [Configuring triggers by YAML](#configuring-triggers-by-yaml) - -### Configuring Pipeline Triggers - -1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Click on **Show Advanced Options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. - - 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. - - 1. **Optional:** Add more branches that trigger a build. - -1. Click **Done.** - -### Configuring Stage Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the stage. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the stage and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the stage. | - | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - -### Configuring Step Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the step. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the step and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the step. | - | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - - -### Configuring Triggers by YAML - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -``` - -# Environment Variables - -When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. - -### Configuring Environment Variables by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. - -1. Add your environment variable(s) into either the script or file. - -1. Click **Save**. - -### Configuring Environment Variables by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 -``` - -# Secrets - -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/). - -### Prerequisite -Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. -
- ->**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). - -### Configuring Secrets by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. - -1. Click **Save**. - -### Configuring Secrets by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${ALIAS_ENV} - # environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV -``` - -# Pipeline Variable Substitution Reference - -For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. - -Variable Name | Description -------------------------|------------------------------------------------------------ -`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). -`CICD_GIT_URL` | URL of the Git repository. -`CICD_GIT_COMMIT` | Git commit ID being executed. -`CICD_GIT_BRANCH` | Git branch of this event. -`CICD_GIT_REF` | Git reference specification of this event. -`CICD_GIT_TAG` | Git tag name, set on tag event. -`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). -`CICD_PIPELINE_ID` | Rancher ID for the pipeline. -`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. -`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. -`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. -`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

[Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) - -# Global Pipeline Execution Settings - -After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. These settings can be edited by selecting **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. - -- [Executor Quota](#executor-quota) -- [Resource Quota for Executors](#resource-quota-for-executors) -- [Custom CA](#custom-ca) - -### Executor Quota - -Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. - -### Resource Quota for Executors - -_Available as of v2.2.0_ - -Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. - -Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. - -To configure compute resources for pipeline-step containers: - -You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. - -In a step, you will provide the following information: - -* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. -* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. -* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. -* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi -``` - ->**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. - -### Custom CA - -_Available as of v2.2.0_ - -If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. - -1. Click **Edit cacerts**. - -1. Paste in the CA root certificates and click **Save cacerts**. - -**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. - -# Persistent Data for Pipeline Components - -The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/storage) - -# Example rancher-pipeline.yml - -An example pipeline configuration file is on [this page.]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example) diff --git a/content/rancher/v2.x/en/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.x/en/pipelines/docs-for-v2.0.x/_index.md deleted file mode 100644 index 3dc486eef..000000000 --- a/content/rancher/v2.x/en/pipelines/docs-for-v2.0.x/_index.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: v2.0.x Pipeline Documentation -weight: 9000 -aliases: - - /rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x - - /rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x - - /rancher/v2.x/en/k8s-in-rancher/pipelines/docs-for-v2.0.x ---- - ->**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). - - - -Pipelines help you automate the software delivery process. You can integrate Rancher with GitHub to create a pipeline. - -You can set up your pipeline to run a series of stages and steps to test your code and deploy it. - -
-
Pipelines
-
Contain a series of stages and steps. Out-of-the-box, the pipelines feature supports fan out and in capabilities.
-
Stages
-
Executed sequentially. The next stage will not execute until all of the steps within the stage execute.
-
Steps
-
Are executed in parallel within a stage.
-
- -## Enabling CI Pipelines - -1. Select cluster from drop down. - -2. Under tools menu select pipelines. - -3. Follow instructions for setting up github auth on page. - - -## Creating CI Pipelines - -1. Go to the project you want this pipeline to run in. - -2. Click **Resources > Pipelines.** In versions before v2.3.0,click **Workloads > Pipelines.** - -4. Click Add pipeline button. - -5. Enter in your repository name (Autocomplete should help zero in on it quickly). - -6. Select Branch options. - - - Only the branch {BRANCH NAME}: Only events triggered by changes to this branch will be built. - - - Everything but {BRANCH NAME}: Build any branch that triggered an event EXCEPT events from this branch. - - - All branches: Regardless of the branch that triggered the event always build. - - >**Note:** If you want one path for master, but another for PRs or development/test/feature branches, create two separate pipelines. - -7. Select the build trigger events. By default, builds will only happen by manually clicking build now in Rancher UI. - - - Automatically build this pipeline whenever there is a git commit. (This respects the branch selection above) - - - Automatically build this pipeline whenever there is a new PR. - - - Automatically build the pipeline. (Allows you to configure scheduled builds similar to Cron) - -8. Click Add button. - - By default, Rancher provides a three stage pipeline for you. It consists of a build stage where you would compile, unit test, and scan code. The publish stage has a single step to publish a docker image. - - -8. Add a name to the pipeline in order to complete adding a pipeline. - -9. Click on the ‘run a script’ box under the ‘Build’ stage. - - Here you can set the image, or select from pre-packaged envs. - -10. Configure a shell script to run inside the container when building. - -11. Click Save to persist the changes. - -12. Click the “publish an image’ box under the “Publish” stage. - -13. Set the location of the Dockerfile. By default it looks in the root of the workspace. Instead, set the build context for building the image relative to the root of the workspace. - -14. Set the image information. - - The registry is the remote registry URL. It is defaulted to Docker hub. - Repository is the `/` in the repository. - -15. Select the Tag. You can hard code a tag like ‘latest’ or select from a list of available variables. - -16. If this is the first time using this registry, you can add the username/password for pushing the image. You must click save for the registry credentials AND also save for the modal. - - - - -## Creating a New Stage - -1. To add a new stage the user must click the ‘add a new stage’ link in either create or edit mode of the pipeline view. - -2. Provide a name for the stage. - -3. Click save. - - -## Creating a New Step - -1. Go to create / edit mode of the pipeline. - -2. Click “Add Step” button in the stage that you would like to add a step in. - -3. Fill out the form as detailed above - - -## Environment Variables - -For your convenience the following environment variables are available in your build steps: - -Variable Name | Description -------------------------|------------------------------------------------------------ -CICD_GIT_REPO_NAME | Repository Name (Stripped of Github Organization) -CICD_PIPELINE_NAME | Name of the pipeline -CICD_GIT_BRANCH | Git branch of this event -CICD_TRIGGER_TYPE | Event that triggered the build -CICD_PIPELINE_ID | Rancher ID for the pipeline -CICD_GIT_URL | URL of the Git repository -CICD_EXECUTION_SEQUENCE | Build number of the pipeline -CICD_EXECUTION_ID | Combination of {CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE} -CICD_GIT_COMMIT | Git commit ID being executed. diff --git a/content/rancher/v2.x/en/pipelines/example-repos/_index.md b/content/rancher/v2.x/en/pipelines/example-repos/_index.md deleted file mode 100644 index a3d434542..000000000 --- a/content/rancher/v2.x/en/pipelines/example-repos/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Example Repositories -weight: 500 -aliases: - - /rancher/v2.x/en/tools/pipelines/quick-start-guide/ - - /rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos ---- - -Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: - -- Go -- Maven -- php - -> **Note:** The example repositories are only available if you have not [configured a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines). - -To start using these example repositories, - -1. [Enable the example repositories](#1-enable-the-example-repositories) -2. [View the example pipeline](#2-view-the-example-pipeline) -3. [Run the example pipeline](#3-run-the-example-pipeline) - -### 1. Enable the Example Repositories - -By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Click **Configure Repositories**. - - **Step Result:** A list of example repositories displays. - - >**Note:** Example repositories only display if you haven't fetched your own repos. - -1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. - -**Results:** - -- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. - -- The following workloads are deployed to a new namespace: - - - `docker-registry` - - `jenkins` - - `minio` - -### 2. View the Example Pipeline - -After enabling an example repository, review the pipeline to see how it is set up. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the example repository, select the vertical **⋮**. There are two ways to view the pipeline: - * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. - * **YAML**: Click on View/Edit YAML to view the `./rancher-pipeline.yml` file. - -### 3. Run the Example Pipeline - -After enabling an example repository, run the pipeline to see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the example repository, select the vertical **⋮ > Run**. - - >**Note:** When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. - -**Result:** The pipeline runs. You can see the results in the logs. - -### What's Next? - -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{}}/rancher/v2.x/en/project-admin/pipelines), [enable a repository](#configure-repositories) and finally [configure your pipeline]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration-reference). diff --git a/content/rancher/v2.x/en/pipelines/example/_index.md b/content/rancher/v2.x/en/pipelines/example/_index.md deleted file mode 100644 index 3ddc4139b..000000000 --- a/content/rancher/v2.x/en/pipelines/example/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Example YAML File -weight: 501 -aliases: - - /rancher/v2.x/en/tools/pipelines/reference/ - - /rancher/v2.x/en/k8s-in-rancher/pipelines/example ---- - -Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. - -In the [pipeline configuration reference]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. - -Below is a full example `rancher-pipeline.yml` for those who want to jump right in. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} - # Set environment variables in container for the step - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 - # Set environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com - - name: Deploy some workloads - steps: - - applyYamlConfig: - path: ./deployment.yaml -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -# timeout in minutes -timeout: 30 -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` diff --git a/content/rancher/v2.x/en/pipelines/storage/_index.md b/content/rancher/v2.x/en/pipelines/storage/_index.md deleted file mode 100644 index 7a9542ac7..000000000 --- a/content/rancher/v2.x/en/pipelines/storage/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Configuring Persistent Data for Pipeline Components -weight: 600 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/pipelines/storage ---- - -The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelines-work) workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/) - ->**Prerequisites (for both parts A and B):** -> ->[Persistent volumes]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) must be available for the cluster. - -### A. Configuring Persistent Data for Docker Registry - -1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** In versions before v2.3.0, select the **Workloads** tab. - -1. Find the `docker-registry` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. - -1. Click **Upgrade**. - -### B. Configuring Persistent Data for Minio - -1. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Find the `minio` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} - -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. - -1. Click **Upgrade**. - -**Result:** Persistent storage is configured for your pipeline components. diff --git a/content/rancher/v2.x/en/project-admin/_index.md b/content/rancher/v2.x/en/project-admin/_index.md deleted file mode 100644 index ade4e5ac6..000000000 --- a/content/rancher/v2.x/en/project-admin/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Project Administration -weight: 9 -aliases: - - /rancher/v2.x/en/project-admin/editing-projects/ ---- - -_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! - -Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. - -You can use projects to perform actions like: - -- [Assign users access to a group of namespaces]({{}}/rancher/v2.x/en/project-admin/project-members) -- Assign users [specific roles in a project]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/) -- [Set resource quotas]({{}}/rancher/v2.x/en/project-admin/resource-quotas/) -- [Manage namespaces]({{}}/rancher/v2.x/en/project-admin/namespaces/) -- [Configure tools]({{}}/rancher/v2.x/en/project-admin/tools/) -- [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.x/en/project-admin/pipelines) -- [Configure pod security policies]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) - -### Authorization - -Non-administrative users are only authorized for project access after an [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. - -Whoever creates the project automatically becomes a [project owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). - -## Switching between Projects - -To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. - -1. From the **Global** view, navigate to the project that you want to configure. - -1. Select **Projects/Namespaces** from the navigation bar. - -1. Select the link for the project that you want to open. diff --git a/content/rancher/v2.x/en/project-admin/namespaces/_index.md b/content/rancher/v2.x/en/project-admin/namespaces/_index.md deleted file mode 100644 index 9f52eb903..000000000 --- a/content/rancher/v2.x/en/project-admin/namespaces/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Namespaces -weight: 2520 ---- - -Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. - -Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. - -Resources that you can assign directly to namespaces include: - -- [Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.x/en/project-admin/namespaces) to ensure that you will have permission to access the namespace. - - -### Creating Namespaces - -Create a new namespace to isolate apps and resources in a project. - ->**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/), [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. - -1. From the **Global** view, open the project where you want to create a namespace. - - >**Tip:** As a best practice, we recommend creating namespaces from the project level. However, cluster owners and members can create them from the cluster level as well. - -1. From the main menu, select **Namespace**. The click **Add Namespace**. - -1. **Optional:** If your project has [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). - -1. Enter a **Name** and then click **Create**. - -**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. - -### Moving Namespaces to Another Project - -Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. - -1. From the **Global** view, open the cluster that contains the namespace you want to move. - -1. From the main menu, select **Projects/Namespaces**. - -1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. - - >**Notes:** - > - >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. - >- You cannot move a namespace into a project that already has a [resource quota]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. - >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. - -1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. - -**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. - -### Editing Namespace Resource Quotas - -You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -For more information, see how to [edit namespace resource quotas]({{}}/rancher/v2.x/en/project-admin//resource-quotas/override-namespace-default/#editing-namespace-resource-quotas). \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/pipelines/_index.md b/content/rancher/v2.x/en/project-admin/pipelines/_index.md deleted file mode 100644 index 7eea9d667..000000000 --- a/content/rancher/v2.x/en/project-admin/pipelines/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher's CI/CD Pipelines -description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users -weight: 4000 -aliases: - - /rancher/v2.x/en/concepts/ci-cd-pipelines/ - - /rancher/v2.x/en/tasks/pipelines/ - - /rancher/v2.x/en/tools/pipelines/configurations/ ---- -Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - -For details, refer to the [pipelines]({{}}/rancher/v2.x/en/k8s-in-rancher/pipelines) section. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md b/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md deleted file mode 100644 index a5367c4cf..000000000 --- a/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Pod Security Policies -weight: 5600 ---- - -> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. - -### Prerequisites - -- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). -- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster]({{}}/rancher/v2.x/en/cluster-admin/pod-security-policy). - -### Applying a Pod Security Policy - -1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit**. -1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. - Assigning a PSP to a project will: - - - Override the cluster's default PSP. - - Apply the PSP to the project. - - Apply the PSP to any namespaces you add to the project later. - -1. Click **Save**. - -**Result:** The PSP is applied to the project and any namespaces added to the project. - ->**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/project-members/_index.md b/content/rancher/v2.x/en/project-admin/project-members/_index.md deleted file mode 100644 index c1848a0de..000000000 --- a/content/rancher/v2.x/en/project-admin/project-members/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Adding Users to Projects -weight: 2505 -aliases: - - /rancher/v2.x/en/tasks/projects/add-project-members/ - - /rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/ ---- - -If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. - -You can add members to a project as it is created, or add them to an existing project. - ->**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{}}/rancher/v2.x/en/cluster-provisioning/cluster-members/) instead. - -### Adding Members to a New Project - -You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) - -### Adding Members to an Existing Project - -Following project creation, you can add users as project members so that they can access its resources. - -1. From the **Global** view, open the project that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the project. - - If external authentication is configured: - - - Rancher returns users from your external authentication source as you type. - - - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. - -1. Assign the user or group **Project** roles. - - [What are Project Roles?]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - - >**Notes:** - > - >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - > - >- For `Custom` roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). - -**Result:** The chosen users are added to the project. - -- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md deleted file mode 100644 index 769c88728..000000000 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Project Resource Quotas -weight: 2515 ---- - -_Available as of v2.1.0_ - -In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. - -This page is a how-to guide for creating resource quotas in existing projects. - -Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/#creating-projects) - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) - -### Applying Resource Quotas to Existing Projects - -_Available as of v2.0.1_ - -Edit [resource quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: - -- You want to limit the resources that a project and its namespaces can use. -- You want to scale the resources available to a project up or down when a research quota is already in effect. - -1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit**. - -1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. - -1. Select a Resource Type. For more information on types, see the [quota type reference.](./quota-type-reference) - -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. - - | Field | Description | - | ----------------------- | -------------------------------------------------------------------------------------------------------- | - | Project Limit | The overall resource limit for the project. | - | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | - -1. **Optional:** Add more quotas. - -1. Click **Create**. - -**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, Rancher won't let you save your changes. diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md deleted file mode 100644 index 2827e3a2f..000000000 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Setting Container Default Resource Limits -weight: 3 ---- - -_Available as of v2.2.0_ - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. - -To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -### Editing the Container Default Resource Limit - -_Available as of v2.2.0_ - -Edit [container default resource limit]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) when: - -- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. -- You want to edit the default container resource limit. - -1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit**. -1. Expand **Container Default Resource Limit** and edit the values. - -### Resource Limit Propagation - -When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. - -> **Note:** Before v2.2.0, you could not launch catalog applications that did not have any limits set. With v2.2.0, you can set a default container resource limit on a project and launch any catalog applications. - -Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. - -### Container Resource Quota Types - -The following resource limits can be configured: - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| -| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | -| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | -| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md deleted file mode 100644 index dc3b768d1..000000000 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Overriding the Default Limit for a Namespace -weight: 2 ---- - -Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. - -In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) for `Namespace 3` so that the namespace can access more resources. - -Namespace Default Limit Override -![Namespace Default Limit Override]({{}}/img/rancher/rancher-resource-quota-override.svg) - -How to: [Editing Namespace Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) - -### Editing Namespace Resource Quotas - -If there is a [resource quota]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the namespace for which you want to edit the resource quota. Select **⋮ > Edit**. - -1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - - For more information about each **Resource Type**, see [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/). - - >**Note:** - > - >- If a resource quota is not configured for the project, these options will not be available. - >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. - -**Result:** Your override is applied to the namespace's resource quota. diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/_index.md deleted file mode 100644 index 18005bc81..000000000 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Resource Quota Type Reference -weight: 4 ---- - -When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit* | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the project/namespace.1 | -| CPU Reservation* | The minimum amount of CPU (in millicores) guaranteed to the project/namespace.1 | -| Memory Limit* | The maximum amount of memory (in bytes) allocated to the project/namespace.1 | -| Memory Reservation* | The minimum amount of memory (in bytes) guaranteed to the project/namespace.1 | -| Storage Reservation | The minimum amount of storage (in gigabytes) guaranteed to the project/namespace. | -| Services Load Balancers | The maximum number of load balancers services that can exist in the project/namespace. | -| Services Node Ports | The maximum number of node port services that can exist in the project/namespace. | -| Pods | The maximum number of pods that can exist in the project/namespace in a non-terminal state (i.e., pods with a state of `.status.phase in (Failed, Succeeded)` equal to true). | -| Services | The maximum number of services that can exist in the project/namespace. | -| ConfigMaps | The maximum number of ConfigMaps that can exist in the project/namespace. | -| Persistent Volume Claims | The maximum number of persistent volume claims that can exist in the project/namespace. | -| Replications Controllers | The maximum number of replication controllers that can exist in the project/namespace. | -| Secrets | The maximum number of secrets that can exist in the project/namespace. | - ->***** When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. As of v2.2.0, a container default resource limit can be set at the same time to avoid the need to explicitly set these limits for every workload. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md deleted file mode 100644 index 63a18ba0f..000000000 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: How Resource Quotas Work in Rancher Projects -weight: 1 ---- - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. - -In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. - -In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. - -Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{}}/img/rancher/kubernetes-resource-quota.svg) - -Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. - -The resource quota includes two limits, which you set while creating or editing a project: -
- -- **Project Limits:** - - This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. - -- **Namespace Default Limits:** - - This value is the default resource limit available for each namespace. When the resource quota is created at the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you override it. - -In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. - -Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{}}/img/rancher/rancher-resource-quota.png) - -Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. - -The following table explains the key differences between the two quota types. - -| Rancher Resource Quotas | Kubernetes Resource Quotas | -| ---------------------------------------------------------- | -------------------------------------------------------- | -| Applies to projects and namespace. | Applies to namespaces only. | -| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | -| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/content/rancher/v2.x/en/project-admin/tools/_index.md b/content/rancher/v2.x/en/project-admin/tools/_index.md deleted file mode 100644 index 0921bdc4c..000000000 --- a/content/rancher/v2.x/en/project-admin/tools/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Tools for Logging, Monitoring, and Visibility -weight: 2525 ---- - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - -- [Notifiers and Alerts](#notifiers-and-alerts) -- [Logging](#logging) -- [Monitoring](#monitoring) - - - -## Notifiers and Alerts - -Notifiers and alerts are two features that work together to inform you of events in the Rancher system. - -[Notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -[Alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts) are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -## Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debugg and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -For details, refer to the [logging section.]({{}}/rancher/v2.x/en/cluster-admin/tools/logging) - -## Monitoring - -_Available as of v2.2.0_ - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring) diff --git a/content/rancher/v2.x/en/quick-start-guide/_index.md b/content/rancher/v2.x/en/quick-start-guide/_index.md deleted file mode 100644 index 28783b317..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Rancher Deployment Quick Start Guides -metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. -short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. -weight: 2 ---- ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.x/en/installation/). - -Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. - -We have Quick Start Guides for: - -- [Deploying Rancher Server]({{}}/rancher/v2.x/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. - -- [Deploying Workloads]({{}}/rancher/v2.x/en/quick-start-guide/workload/): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. - -- [Using the CLI]({{}}/rancher/v2.x/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/content/rancher/v2.x/en/quick-start-guide/cli/_index.md b/content/rancher/v2.x/en/quick-start-guide/cli/_index.md deleted file mode 100644 index 41e15bb83..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/cli/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: CLI with Rancher -weight: 100 ---- - -Interact with Rancher using command line interface (CLI) tools from your workstation. - -## Rancher CLI - -Follow the steps in [rancher cli](../cli). - -Ensure you can run `rancher kubectl get pods` successfully. - - -## kubectl -Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - - -Configure kubectl by visiting your cluster in the Rancher Web UI then clicking on `Kubeconfig`, copying contents and putting into your `~/.kube/config` file. - -Run `kubectl cluster-info` or `kubectl get pods` successfully. - -## Authentication with kubectl and kubeconfig Tokens with TTL - -_**Available as of v2.4.6**_ - -_Requirements_ - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.x/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher cli](../cli) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see error like: -`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. - -This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: - -1. Local -2. Active Directory -3. FreeIpa, OpenLdap -4. SAML providers - Ping, Okta, ADFS, Keycloak, Shibboleth - -When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. -The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid till [it expires](../../api/api-tokens/#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../api/api-tokens/#deleting-tokens) -Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. - -_Note_ - -As of CLI [v2.4.10](https://github.com/rancher/cli/releases/tag/v2.4.10), the kubeconfig token can be cached at a chosen path with `cache-dir` flag or env var `RANCHER_CACHE_DIR`. - -_**Current Known Issues**_ - -1. If [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) is enabled for RKE clusters to [authenticate directly with downstream cluster]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) and Rancher server goes down, all kubectl calls will fail after the kubeconfig token expires. No new kubeconfig tokens can be generated if Rancher server isn't accessible. -2. If a kubeconfig token is deleted from Rancher [API tokens]({{}}/rancher/v2.x/en/api/api-tokens/#deleting-tokens) page, and the token is still cached, cli won't ask you to login again until the token expires or is deleted. -`kubectl` calls will result into an error like `error: You must be logged in to the server (the server has asked for the client to provide credentials`. Tokens can be deleted using `rancher token delete`. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/_index.md deleted file mode 100644 index 34400158a..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Deploying Rancher Server -weight: 100 ---- - -Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - -# Automated Quickstart to Deploy Rancher on Amazon EKS - -Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS Kubernetes cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) -# Installing Rancher on a Virtual Machine - -The following guides use automation tools to deploy the Rancher server on a virtual machine. - -> These guides do not deploy Rancher on a separate Kubernetes cluster, which is a best practice in cases where the Rancher server needs to manage downstream Kubernetes clusters. - -- [DigitalOcean](./digital-ocean-qs) (uses Terraform) -- [AWS](./amazon-aws-qs) (uses Terraform) -- [Azure](./microsoft-azure-qs) (uses Terraform) -- [GCP](./google-gcp-qs) (uses Terraform) -- [Vagrant](./quickstart-vagrant) - -If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. - -- [Manual Install](./quickstart-manual-setup) diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md deleted file mode 100644 index 954cf058d..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Rancher AWS Quick Start Guide -description: Read this step by step Rancher AWS guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher Server on AWS with a single node cluster attached. - -## Prerequisites - ->**Note** ->Deploying to Amazon AWS will incur charges. - -- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. -- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. -- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the AWS folder containing the terraform files by executing `cd quickstart/aws`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `aws_access_key` - Amazon AWS Access Key - - `aws_secret_key` - Amazon AWS Secret Key - - `rancher_server_admin_password` - Admin password for created Rancher server - -1. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. -Suggestions include: - - `aws_region` - Amazon AWS region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/aws` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md deleted file mode 100644 index 30b1f46c9..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Rancher DigitalOcean Quick Start Guide -description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher Server on DigitalOcean with a single node cluster attached. - -## Prerequisites - ->**Note** ->Deploying to DigitalOcean will incur charges. - -- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. -- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/do`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `do_token` - DigitalOcean access key - - `rancher_server_admin_password` - Admin password for created Rancher server - -1. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. -Suggestions include: - - `do_region` - DigitalOcean region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget - - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 15 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/do` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/google-gcp-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/google-gcp-qs/_index.md deleted file mode 100644 index 57e610664..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/google-gcp-qs/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Rancher GCP Quick Start Guide -description: Read this step by step Rancher GCP guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher server on GCP in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - -## Prerequisites - ->**Note** ->Deploying to Google GCP will incur charges. - -- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. -- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. -- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the GCP folder containing the terraform files by executing `cd quickstart/gcp`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `gcp_account_json` - GCP service account file path and file name - - `rancher_server_admin_password` - Admin password for created Rancher server - -1. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. -Suggestions include: - - `gcp_region` - Google GCP region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget - - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/gcp` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md deleted file mode 100644 index be100d171..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Rancher Azure Quick Start Guide -description: Read this step by step Rancher Azure guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- - -The following steps will quickly deploy a Rancher server on Azure in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - -## Prerequisites - ->**Note** ->Deploying to Microsoft Azure will incur charges. - -- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. -- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. -- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. -- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the Azure folder containing the terraform files by executing `cd quickstart/azure`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `azure_subscription_id` - Microsoft Azure Subscription ID - - `azure_client_id` - Microsoft Azure Client ID - - `azure_client_secret` - Microsoft Azure Client Secret - - `azure_tenant_id` - Microsoft Azure Tenant ID - - `rancher_server_admin_password` - Admin password for created Rancher server - -2. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. -Suggestions include: - - `azure_location` - Microsoft Azure region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -2. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/azure`. - -#### Result - -Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/azure` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md deleted file mode 100644 index 39819061e..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Manual Quick Start -weight: 300 ---- -Howdy Partner! This tutorial walks you through: - -- Installation of Rancher 2.x -- Creation of your first cluster -- Deployment of an application, Nginx - -## Quick Start Outline - -This Quick Start Guide is divided into different tasks for easier consumption. - - - - -1. [Provision a Linux Host](#1-provision-a-linux-host) - -1. [Install Rancher](#2-install-rancher) - -1. [Log In](#3-log-in) - -1. [Create the Cluster](#4-create-the-cluster) - - -
-### 1. Provision a Linux Host - - Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-prem VM -- A bare-metal server - - >**Note:** - > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. - > - > For a full list of port requirements, refer to [Docker Installation]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/). - - Provision the host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements/). - -### 2. Install Rancher - -To install Rancher on your host, connect to it and then use a shell to install. - -1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. - -2. From your shell, enter the following command: - - ``` - sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged rancher/rancher - ``` - -**Result:** Rancher is installed. - -### 3. Log In - -Log in to Rancher to begin using the application. After you log in, you'll make some one-time configurations. - -1. Open a web browser and enter the IP address of your host: `https://`. - - Replace `` with your host IP address. - -2. When prompted, create a password for the default `admin` account there cowpoke! - -3. Set the **Rancher Server URL**. The URL can either be an IP address or a host name. However, each node added to your cluster must be able to connect to this URL.

If you use a hostname in the URL, this hostname must be resolvable by DNS on the nodes you want to add to you cluster. - -
- -### 4. Create the Cluster - -Welcome to Rancher! You are now able to create your first Kubernetes cluster. - -In this task, you can use the versatile **Custom** option. This option lets you add _any_ Linux host (cloud-hosted VM, on-prem VM, or bare-metal) to be used in a cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Existing Nodes**. - -3. Enter a **Cluster Name**. - -4. Skip **Member Roles** and **Cluster Options**. We'll tell you about them later. - -5. Click **Next**. - -6. From **Node Role**, select _all_ the roles: **etcd**, **Control**, and **Worker**. - -7. **Optional**: Rancher auto-detects the IP addresses used for Rancher communication and cluster communication. You can override these using `Public Address` and `Internal Address` in the **Node Address** section. - -8. Skip the **Labels** stuff. It's not important for now. - -9. Copy the command displayed on screen to your clipboard. - -10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - -11. When you finish running the command on your Linux host, click **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -#### Finished - -Congratulations! You have created your first cluster. - -#### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md deleted file mode 100644 index ea28bf2c3..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Vagrant Quick Start -weight: 200 ---- -The following steps quickly deploy a Rancher Server with a single node cluster attached. - -## Prerequisites - -- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. -- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. -- At least 4GB of free RAM. - -### Note -- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: - - `vagrant plugin install vagrant-vboxmanage` - - `vagrant plugin install vagrant-vbguest` - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the folder containing the Vagrantfile by executing `cd quickstart/vagrant`. - -3. **Optional:** Edit `config.yaml` to: - - - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) - - Change the password of the `admin` user for logging into Rancher. (`default_password`) - -4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. - -5. Once provisioning finishes, go to `https://172.22.101.101` in the browser. The default user/password is `admin/admin`. - -**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.x/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/vagrant` folder execute `vagrant destroy -f`. - -2. Wait for the confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/_index.md deleted file mode 100644 index a3be7493b..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/workload/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Deploying Workloads -weight: 200 ---- - -These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - -- [Workload with Ingress](./quickstart-deploy-workload-ingress) -- [Workload with NodePort](./quickstart-deploy-workload-nodeport) diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md deleted file mode 100644 index 0a8f87ce9..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Workload with Ingress Quick Start -weight: 100 ---- - -### Prerequisite - -You have a running cluster with at least 1 node. - -### 1. Deploying a Workload - -You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. - -For this workload, you'll be deploying the application Rancher Hello-World. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. - -3. Open the **Project: Default** project. - -4. Click **Resources > Workloads.** In versions before v2.3.0, click **Workloads > Workloads.** - -5. Click **Deploy**. - - **Step Result:** The **Deploy Workload** page opens. - -6. Enter a **Name** for your workload. - -7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. - -8. Leave the remaining options on their default setting. We'll tell you about them later. - -9. Click **Launch**. - -**Result:** - -* Your workload is deployed. This process might take a few minutes to complete. -* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. - -
-### 2. Expose The Application Via An Ingress - -Now that the application is up and running it needs to be exposed so that other services can connect. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects**. - -3. Open the **Default** project. - -4. Click **Resources > Workloads > Load Balancing.** In versions before v2.3.0, click the **Workloads** tab. Click on the **Load Balancing** tab. - -5. Click **Add Ingress**. - -6. Enter a name i.e. **hello**. - -7. In the **Target** field, drop down the list and choose the name that you set for your service. - -8. Enter `80` in the **Port** field. - -9. Leave everything else as default and click **Save**. - -**Result:** The application is assigned a `sslip.io` address and exposed. It may take a minute or two to populate. - -### View Your Application - -From the **Load Balancing** page, click the target link, which will look something like `hello.default.xxx.xxx.xxx.xxx.sslip.io > hello-world`. - -Your application will open in a separate window. - -#### Finished - -Congratulations! You have successfully deployed a workload exposed via an ingress. - -#### What's Next? - -When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: - -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md deleted file mode 100644 index 2920ee579..000000000 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Workload with NodePort Quick Start -weight: 200 ---- - -### Prerequisite - -You have a running cluster with at least 1 node. - -### 1. Deploying a Workload - -You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. - -For this workload, you'll be deploying the application Rancher Hello-World. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. - -3. Open the **Project: Default** project. - -4. Click **Resources > Workloads.** In versions before v2.3.0, click **Workloads > Workloads.** - -5. Click **Deploy**. - - **Step Result:** The **Deploy Workload** page opens. - -6. Enter a **Name** for your workload. - -7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. - -8. From **Port Mapping**, click **Add Port**. - -9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. - - ![As a dropdown, NodePort (On every node selected)]({{}}/img/rancher/nodeport-dropdown.png) - -10. From the **On Listening Port** field, leave the **Random** value in place. - - ![On Listening Port, Random selected]({{}}/img/rancher/listening-port-field.png) - -11. From the **Publish the container port** field, enter port `80`. - - ![Publish the container port, 80 entered]({{}}/img/rancher/container-port-field.png) - -12. Leave the remaining options on their default setting. We'll tell you about them later. - -13. Click **Launch**. - -**Result:** - -* Your workload is deployed. This process might take a few minutes to complete. -* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. - -
- -### 2. Viewing Your Application - -From the **Workloads** page, click the link underneath your workload. If your deployment succeeded, your application opens. - -### Attention: Cloud-Hosted Sandboxes - -When using a cloud-hosted virtual machine, you may not have access to the port running the container. In this event, you can test Nginx in an ssh session on the local machine using `Execute Shell`. Use the port number after the `:` in the link under your workload if available, which is `31568` in this example. - -```sh -gettingstarted@rancher:~$ curl http://localhost:31568 - - - - Rancher - - - - - -

Hello world!

-

My hostname is hello-world-66b4b9d88b-78bhx

-
-

k8s services found 2

- - INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
- - KUBERNETES tcp://10.43.0.1:443
- -
-
- - -
- - -
- - - -gettingstarted@rancher:~$ - -``` - -### Finished - -Congratulations! You have successfully deployed a workload exposed via a NodePort. - -#### What's Next? - -When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: - -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.x/en/security/_index.md b/content/rancher/v2.x/en/security/_index.md deleted file mode 100644 index e0105e982..000000000 --- a/content/rancher/v2.x/en/security/_index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Security -weight: 20 ---- - - - - - - - -
-

Security policy

-

Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

-
-

Reporting process

-

Please submit possible security issues by emailing security@rancher.com

-
-

Announcements

-

Subscribe to the Rancher announcements forum for release updates.

-
- -Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,]({{}}/rancher/v2.x/en/admin-settings/rbac) Rancher makes your Kubernetes clusters even more secure. - -On this page, we provide security-related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: - -- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) -- [Guide to hardening Rancher installations](#rancher-hardening-guide) -- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) -- [Third-party penetration test reports](#third-party-penetration-test-reports) -- [Rancher CVEs and resolutions](#rancher-cves-and-resolutions) - -### Running a CIS Security Scan on a Kubernetes Cluster - -_Available as of v2.4.0_ - -Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS (Center for Internet Security) Kubernetes Benchmark. - -The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. - -The Center for Internet Security (CIS) is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace." - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. - -When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. - -For details, refer to the section on [security scans.]({{}}/rancher/v2.x/en/cis-scans) - -### Rancher Hardening Guide - -The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. - -The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x, v2.2.x and v.2.3.x. See Rancher's guides for [Self Assessment of the CIS Kubernetes Benchmark](#the-cis-benchmark-and-self-sssessment) for the full list of security controls. - -> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. - -Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -[Hardening Guide v2.5]({{}}/rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/) | Rancher v2.5 | Benchmark v1.6 | Kubernetes v1.18 -[Hardening Guide v2.4]({{}}/rancher/v2.x/en/security/hardening-2.4/) | Rancher v2.4 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.5]({{}}/rancher/v2.x/en/security/hardening-2.3.5/) | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.3]({{}}/rancher/v2.x/en/security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 -[Hardening Guide v2.3]({{}}/rancher/v2.x/en/security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 -[Hardening Guide v2.2]({{}}/rancher/v2.x/en/security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 -[Hardening Guide v2.1]({{}}/rancher/v2.x/en/security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 - -### The CIS Benchmark and Self-Assessment - -The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). - -Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -[Self Assessment Guide v2.5]({{}}/rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/) | Rancher v2.5 | Hardening Guide v2.5 | Kubernetes v1.18 | Benchmark v1.6 -[Self Assessment Guide v2.4]({{}}/rancher/v2.x/en/security/benchmark-2.4/#cis-kubernetes-benchmark-1-5-0-rancher-2-4-with-kubernetes-1-15) | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.5]({{}}/rancher/v2.x/en/security/benchmark-2.3.5/#cis-kubernetes-benchmark-1-5-0-rancher-2-3-5-with-kubernetes-1-15) | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.3]({{}}/rancher/v2.x/en/security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 -[Self Assessment Guide v2.3]({{}}/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 -[Self Assessment Guide v2.2]({{}}/rancher/v2.x/en/security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 -[Self Assessment Guide v2.1]({{}}/rancher/v2.x/en/security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 - -### Third-party Penetration Test Reports - -Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. - -Results: - -- [Cure53 Pen Test - 7/2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) -- [Untamed Theory Pen Test- 3/2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) - -### Rancher CVEs and Resolutions - -Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](./cve) diff --git a/content/rancher/v2.x/en/security/cve/_index.md b/content/rancher/v2.x/en/security/cve/_index.md deleted file mode 100644 index 81c0b71ce..000000000 --- a/content/rancher/v2.x/en/security/cve/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Rancher CVEs and Resolutions -weight: 300 ---- - -Rancher is committed to informing the community of security issues in our products. Rancher will publish CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. - -| ID | Description | Date | Resolution | -|----|-------------|------|------------| -| [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server, i.e. local server, and return the requested information. You are vulnerable if you are running any Rancher 2.x version. Only valid Rancher users who have some level of permission on the cluster can perform the request. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher where users were granted access to resources regardless of the resource's API group. For example Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. You are vulnerable if you are running any Rancher 2.x version. The extent of the exploit increases if there are other matching CRD resources installed in the cluster. There is no direct mitigation besides upgrading to the patched versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud credential ID that was valid for a given cloud provider could make requests against that cloud provider's API through the proxy API, and the cloud credential would be attached. You are vulnerable if you are running any Rancher 2.2.0 or above and use cloud credentials. The exploit is limited to valid Rancher users. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.x/en/upgrades/rollbacks/). | -| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | -| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | -| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | -| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2021-25313](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25313) | A security vulnerability was discovered on all Rancher 2 versions. When accessing the Rancher API with a browser, the URL was not properly escaped, making it vulnerable to an XSS attack. Specially crafted URLs to these API endpoints could include JavaScript which would be embedded in the page and execute in a browser. There is no direct mitigation. Avoid clicking on untrusted links to your Rancher server. | 2 Mar 2021 | [Rancher v2.5.6](https://github.com/rancher/rancher/releases/tag/v2.5.6), [Rancher v2.4.14](https://github.com/rancher/rancher/releases/tag/v2.4.14), and [Rancher v2.3.11](https://github.com/rancher/rancher/releases/tag/v2.3.11) | \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/rancher-2.1/_index.md b/content/rancher/v2.x/en/security/rancher-2.1/_index.md deleted file mode 100644 index 31ca2f58b..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.1/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher v2.1 -weight: 5 ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.1) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.1 | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes 1.11 | Benchmark 1.3.0 - -### Hardening Guide - -This hardening [guide](./hardening-2.1) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 diff --git a/content/rancher/v2.x/en/security/rancher-2.1/benchmark-2.1/_index.md b/content/rancher/v2.x/en/security/rancher-2.1/benchmark-2.1/_index.md deleted file mode 100644 index 84112b8af..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.1/benchmark-2.1/_index.md +++ /dev/null @@ -1,1767 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide v2.1 -weight: 209 -aliases: - - /rancher/v2.x/en/security/benchmark-2.1 ---- - -This document is a companion to the Rancher v2.1 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.1 | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes 1.11 | Benchmark 1.3.0 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.3.0. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Benchmark_Assessment.pdf) - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. - -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. - -Tests will have an exit code of zero on success and non-zero on failure. - -#### Known Scored Control Failures - -The following scored controls do not currently pass, and Rancher Labs is working towards addressing these through future enhancements to the product. - -- 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) -- 1.3.6 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) -- 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more-restrictive (Scored) -- 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) -- 2.1.3 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.9 - Ensure that the `--hostname-override` argument is not set (Scored) -- 2.1.13 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) -- 2.1.14 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -### Controls - ---- - -## 1 - Master Node Security Configuration - -### 1.1 - API Server - -#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Notes** - -Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' -``` - -**Returned Value:** `--insecure-port=0` - -**Result:** Pass - -#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' -``` - -**Returned Value:** `--secure-port=6443` - -**Result:** Pass - -#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--repair-malformed-updates=false").string' -``` - -**Returned Value:** `--repair-malformed-updates=false` - -**Result:** Pass - -#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' -``` - -**Returned Value:** `AlwaysPullImages` - -**Result:** Pass - -#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' -``` - -**Returned Value:** `DenyEscalatingExec` - -**Result:** Pass - -#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Scored) - -**Notes** - -This control may be out of date. This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the Kubernetes 1.11 documentation: - -> This should be enabled if a cluster doesn’t utilize pod security policies to restrict the set of values a security context can take. - -Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Document - -#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' -``` - -**Returned Value:** `NamespaceLifecycle` - -**Result:** Pass - -#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) - -**Notes** - -This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' -``` - -**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` - -**Result:** Pass - -#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxage=5` - -**Result:** Pass - -#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxbackup=5` - -**Result:** Pass - -#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxsize=100` - -**Result:** Pass - -#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` - -**Result:** Pass - -#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Notes** - -RKE is using the kubelet's ability to automatically create self-signed certs. No CA cert is saved to verify the communication between `kube-apiserver` and `kubelet`. - -**Mitigation** - -Make sure nodes with `role:controlplane` are on the same local network as your nodes with `role:worker`. Use network ACLs to restrict connections to the kubelet port (10250/tcp) on worker nodes, only permitting it from controlplane nodes. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' -``` - -**Returned Value:** none - -**Result:** Fail (See Mitigation) - -#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Audit** (`--kubelet-client-certificate`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' -``` - -**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--kubelet-client-key`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' -``` - -**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' -``` - -**Returned Value:** `--service-account-lookup=true` - -**Result:** Pass - -#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' -``` - -**Returned Value:** `PodSecurityPolicy` - -**Result:** Pass - -#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' -``` - -**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Audit** (`--etcd-certfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' -``` - -**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` - -**Audit** (`--etcd-keyfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' -``` - -**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` - -**Result:** Pass - -#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' -``` - -**Returned Value:** `ServiceAccount` - -**Result:** Pass - -#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--tls-key-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` -**Result:** Pass - -#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** The return should be blank. - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** The return should be blank. - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** The return should be blank. - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' -``` - -**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.32 - Ensure that the `--authorization-mode` argument is set to Node (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBA` -**Result:** Pass - -#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' -``` - -**Returned Value:** `NodeRestriction` - -**Result:** Pass - -#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--experimental-encryption-provider-config=.*").string' -``` - -**Returned Value:** `--experimental-encryption-provider-config=/etc/kubernetes/encryption.yaml` - -**Result:** Pass - -#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) - -**Notes** - -Only the first provider in the list is active. - -**Audit** - -``` bash -grep -A 1 providers: /etc/kubernetes/encryption.yaml | grep aescbc -``` - -**Returned Value:** `- aescbc:` - -**Result:** Pass - -#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Notes** - -The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: - -- `/etc/kubernetes/admission.yaml` -- `/etc/kubernetes/event.yaml` - -See Host Configuration for details. - -**Audit** (Admissions plugin) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' -``` - -**Returned Value:** `EventRateLimit` - -**Audit** (`--admission-control-config-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' -``` - -**Returned Value:** `--admission-control-config-file=/etc/kubernetes/admission.yaml` - -**Result:** Pass - -#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) - -**Notes** - -`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. - -**Audit** (Feature Gate) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** (Audit Policy File) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' -``` - -**Returned Value:** `--audit-policy-file=/etc/kubernetes/audit.yaml` - -**Result:** Pass - -#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Notes** - -RKE uses the default value of 60s and doesn't set this option. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.39 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) - -**Notes** - -This appears to be a repeat of 1.1.30. - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -### 1.2 - Scheduler - -#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` -**Result:** Pass - -#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` -**Result:** Pass - -### 1.3 - Controller Manager - -#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' -``` - -**Returned Value:** `--terminated-pod-gc-threshold=1000` -**Result:** Pass - -#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' -``` - -**Returned Value:** `--use-service-account-credentials=true` - -**Result:** Pass - -#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' -``` - -**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' -``` - -**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) - -**Notes** - -RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Fail - -#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` - -**Result:** Pass - -### 1.4 - Configuration Files - -#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -``` bash -ls -l /var/lib/cni/networks/k8s-pod-network/ -``` - -**Returned Value:** - -``` bash --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.2 --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.3 --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.4 --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.5 --rw-r--r-- 1 root root 10 Nov 7 16:25 last_reserved_ip.0 -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -ls -l /etc/cni/net.d/ -``` - -**Returned Value:** - -``` bash --rw-r--r-- 1 root root 1474 Nov 6 20:05 10-calico.conflist --rw------- 1 root root 2529 Nov 6 20:05 calico-kubeconfig -``` - -**Result:** Pass - -#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -``` bash -ls -l /var/lib/cni/networks/k8s-pod-network/ -``` - -**Returned Value:** - -``` bash --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.2 --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.3 --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.4 --rw-r--r-- 1 root root 64 Nov 6 20:05 10.42.0.5 --rw-r--r-- 1 root root 10 Nov 7 16:25 last_reserved_ip.0 -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -ls -l /etc/cni/net.d/ -``` - -**Returned Value:** - -``` bash --rw-r--r-- 1 root root 1474 Nov 6 20:05 10-calico.conflist --rw------- 1 root root 2529 Nov 6 20:05 calico-kubeconfig -``` - -**Result:** Pass - -#### 1.4.11 - Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored) - -**Notes** - -Files underneath the data dir are permissioned `700` - -``` bash -ls -al /var/lib/etcd -total 12 -drwxr-xr-x 3 root root 4096 Nov 14 17:06 . -drwxr-xr-x 47 root root 4096 Nov 14 17:06 .. -drwx------ 4 root root 4096 Nov 14 17:06 member -``` - -**Audit** - -``` bash -stat -c %a /var/lib/etcd -``` - -**Returned Value:** `755` - -**Result:** Fail - -#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) - -**Notes** - -The `etcd` container runs as the `root` user. The data directory and files are owned by `root`. - -**Audit** - -``` bash -stat -c %U:%G /var/lib/etcd -``` - -**Returned Value:** `root:root` - -**Result:** Fail - -#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -**Result:** Pass (Not Applicable) - -#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) - -**Notes** - -RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. - -**Result:** Pass (Not Applicable) - -#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -### 1.5 - etcd - -#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Audit** `(--cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' -``` - -**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` - -**Audit** (`--key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' -``` - -**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` - -**Result:** Pass - -#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--client-cert-auth` - -**Result:** Pass - -#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--peer-cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' -``` - -**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` - -**Audit** (`--peer-key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' -``` - -**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` - -**Result:** Pass - -#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--client-cert-auth` - -**Result:** Pass - -#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) - -**Notes** - -RKE does not currently implement a separate CA for etcd certificates. - -`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Fail - -#### 1.6 - General Security Primitives - -These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. - -#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) - - -Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. - -Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. - -#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) - -With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. - -#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. - -See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. - -#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) - -Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. - -#### 1.6.5 - Apply security context to your pods and containers (Not Scored) - -This practice does go against control 1.1.13, but we prefer using `PodSecurityPolicy` and allowing security context to be set over a blanket deny. - -Rancher allows users to set various Security Context options when launching pods via the GUI interface. - -#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) - -Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. - -#### 1.6.7 - Configure network policies as appropriate (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. - -See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. - -#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) - -Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. - -With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. - - -#### 1.7 - Pod Security Policies (PSP) - -This RKE configuration has two Pod Security Policies. - -- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. -- `restricted`: This is the cluster default PSP and follows the best practices defined by controls in this section. - -#### 1.7.1 - Do not admit privileged containers (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.privileged}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostPID}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostIPC}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostNetwork}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy (PSP) is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" -``` - -**Returned Value:** `[NET_RAW]` - -**Result:** Pass - -## 2 - Worker Node Security Configuration - -### 2.1 - Kubelet - -#### 2.1.1 - Ensure that the `--allow-privileged` argument is set to false (Scored) - -**Notes** - -The `--allow-privileged` argument is deprecated from Kubernetes v1.11, and the default setting is `true` with the intention that users should use `PodSecurityPolicy` settings to allow or prevent privileged containers. - -Our RKE configuration uses `PodSecurityPolicy` with a default policy to reject privileged containers. - -**Result:** Pass (Not Applicable) - -#### 2.1.2 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 2.1.3 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Notes** - -RKE currently runs the kubelet without the `--authorization-mode` flag. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' -``` - -**Returned Value:** `null` - -**Result:** Fail - -#### 2.1.4 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 2.1.5 - Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' -``` - -**Returned Value:** `--read-only-port=0` - -**Result:** Pass - -#### 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' -``` - -**Returned Value:** `--streaming-connection-idle-timeout=1800s` - -**Result:** Pass - -#### 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' -``` - -**Returned Value:** `--protect-kernel-defaults=true` - -**Result:** Pass - -#### 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' -``` - -**Returned Value:** `--make-iptables-util-chains=true` - -**Result:** Pass - -#### 2.1.9 - Ensure that the `--hostname-override` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--hostname-override=.*").string' -``` - -**Returned Value:** `--hostname-override=` - -**Result:** Fail - -#### 2.1.10 - Ensure that the `--event-qps` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' -``` - -**Returned Value:** `--event-qps=0` - -**Result:** Pass - -#### 2.1.11 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Notes** - -RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `null` - -**Audit** (`--tls-private-key-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 2.1.12 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' -``` - -**Returned Value:** `--cadvisor-port=0` - -**Result:** Pass - -#### 2.1.13 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Notes** - -RKE will enable certificate rotation in version 0.1.12. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' -``` - -**Returned Value:** `null` - -**Result:** Fail - -#### 2.1.14 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Notes** - -RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Fail - -#### 2.1.15 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -### 2.2 - Configuration Files - -#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - - -#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) diff --git a/content/rancher/v2.x/en/security/rancher-2.1/hardening-2.1/_index.md b/content/rancher/v2.x/en/security/rancher-2.1/hardening-2.1/_index.md deleted file mode 100644 index 32ba7390a..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.1/hardening-2.1/_index.md +++ /dev/null @@ -1,1176 +0,0 @@ ---- -title: Hardening Guide v2.1 -weight: 104 -aliases: - - /rancher/v2.x/en/security/hardening-2.1 ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Hardening_Guide.pdf) - -For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x]({{}}/rancher/v2.x/en/security/benchmark-2.1/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher HA Kubernetes cluster host configuration - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -kernel.panic=10 -kernel.panic_on_oops=1 -``` - -- Run `sysctl -p` to enable the settings. - -### 1.1.2 - Install the encryption provider configuration on all control plane nodes - -**Profile Applicability** - -- Level 1 - -**Description** - -Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: - -**Rationale** - -This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. - -This supports the following controls: - -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) - -**Audit** - -On the control plane hosts for the Rancher HA cluster run: - -``` bash -stat /etc/kubernetes/encryption.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: -- resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. - -**Remediation** - -- Generate a key and an empty configuration file: - -``` bash -head -c 32 /dev/urandom | base64 -i - -touch /etc/kubernetes/encryption.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /etc/kubernetes/encryption.yaml -chmod 0600 /etc/kubernetes/encryption.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `secret` is the 32-byte base64-encoded string generated in the first step. - -### 1.1.3 - Install the audit log configuration on all control plane nodes. - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. - -**Rationale** - -The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. - -This supports the following controls: - -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) - -**Audit** - -On each control plane node, run: - -``` bash -stat /etc/kubernetes/audit.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /etc/kubernetes/audit.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /etc/kubernetes/audit.yaml -chmod 0600 /etc/kubernetes/audit.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -### 1.1.4 - Place Kubernetes event limit configuration on each control plane host - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. - -**Rationale** - -Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. - -This supports the following control: - -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Audit** - -On nodes with the `controlplane` role run: - -``` bash -stat /etc/kubernetes/admission.yaml -stat /etc/kubernetes/event.yaml -``` - -For each file, ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` - -For `admission.yaml` ensure that the file contains: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /etc/kubernetes/event.yaml -``` - -For `event.yaml` ensure that the file contains: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 500 - burst: 5000 -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /etc/kubernetes/admission.yaml -touch /etc/kubernetes/event.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /etc/kubernetes/admission.yaml -chown root:root /etc/kubernetes/event.yaml -chmod 0600 /etc/kubernetes/admission.yaml -chmod 0600 /etc/kubernetes/event.yaml -``` - -- For `admission.yaml` set the contents to: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /etc/kubernetes/event.yaml -``` - -- For `event.yaml` set the contents to: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 500 - burst: 5000 -``` - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix A. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling argument` is set to false (Scored) -- 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---repair-malformed-updates=false ---service-account-lookup=true ---enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---experimental-encryption-provider-config=/etc/kubernetes/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=5 ---audit-log-maxbackup=5 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit.yaml -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - repair-malformed-updates: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - experimental-encryption-provider-config: /etc/kubernetes/encryption.yaml - admission-control-config-file: "/etc/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /etc/kubernetes/audit.yaml - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - … - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive PodSecurityPolicy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole psp:restricted -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding psp:restricted -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- Upgrade to Rancher v2.1.2 via the Helm chart. While performing the upgrade, provide the following installation flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local administrator password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local administrator password should be changed from the default. - -**Rationale** - -The default administrator password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - ---- - -## Appendix A - Complete RKE `cluster.yml` Example - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] - -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "1800s" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - repair-malformed-updates: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - experimental-encryption-provider-config: /etc/kubernetes/encryption.yaml - admission-control-config-file: "/etc/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /etc/kubernetes/audit.yaml - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.2/_index.md b/content/rancher/v2.x/en/security/rancher-2.2/_index.md deleted file mode 100644 index 457ecb447..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.2/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher v2.2 -weight: 4 ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.2) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.2 | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes 1.13 | Benchmark v1.4.0 and v1.4.1 - -### Hardening Guide - -This hardening [guide](./hardening-2.2) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/rancher-2.2/benchmark-2.2/_index.md b/content/rancher/v2.x/en/security/rancher-2.2/benchmark-2.2/_index.md deleted file mode 100644 index 7662c1d25..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.2/benchmark-2.2/_index.md +++ /dev/null @@ -1,1798 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide v2.2 -weight: 208 -aliases: - - /rancher/v2.x/en/security/benchmark-2.2 ---- - -This document is a companion to the Rancher v2.2 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.2 | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes 1.13 | Benchmark v1.4.0 and v1.4.1 - -### CIS Kubernetes Benchmark 1.4.0 - Rancher 2.2.x with Kubernetes 1.13 -There is no material difference in control verification checks between CIS Kubernetes Benchmark 1.4.0 and 1.4.1. - -### CIS Kubernetes Benchmark 1.4.1 - Rancher 2.2.x with Kubernetes 1.13 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Benchmark_Assessment.pdf) - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.4.0. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. - -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. - -#### Known Scored Control Failures - -The following scored controls do not currently pass, and Rancher Labs is working towards addressing these through future enhancements to the product. - -- 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) -- 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more-restrictive (Scored) -- 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) -- 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) - -### Controls - ---- - -## 1 - Master Node Security Configuration - -### 1.1 - API Server - -#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Notes** - -Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' -``` - -**Returned Value:** `--insecure-port=0` - -**Result:** Pass - -#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' -``` - -**Returned Value:** `--secure-port=6443` - -**Result:** Pass - -#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) - -**Note:** This deprecated flag was removed in 1.14, so it cannot be set. - -**Result:** Pass - -#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' -``` - -**Returned Value:** `AlwaysPullImages` - -**Result:** Pass - -#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' -``` - -**Returned Value:** `DenyEscalatingExec` - -**Result:** Pass - -#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Not Scored) - -**Notes** - -This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the CIS Benchmark document: - -> This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies - -Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Document - -#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' -``` - -**Returned Value:** `NamespaceLifecycle` - -**Result:** Pass - -#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) - -**Notes** - -This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' -``` - -**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` - -**Result:** Pass - -#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxage=5` - -**Result:** Pass - -#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxbackup=5` - -**Result:** Pass - -#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxsize=100` - -**Result:** Pass - -#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` - -**Result:** Pass - -#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Notes** - -RKE is using the kubelet's ability to automatically create self-signed certs. No CA cert is saved to verify the communication between `kube-apiserver` and `kubelet`. - -**Mitigation** - -Make sure nodes with `role:controlplane` are on the same local network as your nodes with `role:worker`. Use network ACLs to restrict connections to the kubelet port (10250/tcp) on worker nodes, only permitting it from controlplane nodes. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' -``` - -**Returned Value:** none - -**Result:** Fail (See Mitigation) - -#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Audit** (`--kubelet-client-certificate`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' -``` - -**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--kubelet-client-key`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' -``` - -**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' -``` - -**Returned Value:** `--service-account-lookup=true` - -**Result:** Pass - -#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' -``` - -**Returned Value:** `PodSecurityPolicy` - -**Result:** Pass - -#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' -``` - -**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` - -**Result:** Pass - -#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Audit** (`--etcd-certfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' -``` - -**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` - -**Audit** (`--etcd-keyfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' -``` - -**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` - -**Result:** Pass - -#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' -``` - -**Returned Value:** `ServiceAccount` - -**Result:** Pass - -#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--tls-key-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` -**Result:** Pass - -#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' -``` - -**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.32 - Ensure that the `--authorization-mode` argument includes Node (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` -**Result:** Pass - -#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' -``` - -**Returned Value:** `NodeRestriction` - -**Result:** Pass - -#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) - -**Notes** -In Kubernetes 1.13.x this flag is `--encryption-provider-config` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' -``` - -**Returned Value:** `encryption-provider-config=/opt/kubernetes/encryption.yaml` - -**Result:** Pass - -#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) - -**Notes** - -Only the first provider in the list is active. - -**Audit** - -``` bash -grep -A 1 providers: /opt/kubernetes/encryption.yaml | grep aescbc -``` - -**Returned Value:** `- aescbc:` - -**Result:** Pass - -#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Notes** - -The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: - -- `/opt/kubernetes/admission.yaml` -- `/opt/kubernetes/event.yaml` - -See Host Configuration for details. - -**Audit** (Admissions plugin) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' -``` - -**Returned Value:** `EventRateLimit` - -**Audit** (`--admission-control-config-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' -``` - -**Returned Value:** `--admission-control-config-file=/opt/kubernetes/admission.yaml` - -**Result:** Pass - -#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) - -**Notes** - -`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. - -**Audit** (Feature Gate) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** (Audit Policy File) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' -``` - -**Returned Value:** `--audit-policy-file=/opt/kubernetes/audit.yaml` - -**Result:** Pass - -#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Notes** - -RKE uses the default value of 60s and doesn't set this option. Tuning this value is specific to the environment. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### Ensure that the --authorization-mode argument includes RBAC (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=.*").string' -``` - -**Returned Value:** `"--authorization-mode=Node,RBAC"` - -**Result:** Pass - -### 1.2 - Scheduler - -#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` -**Result:** Pass - -#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` -**Result:** Pass - -### 1.3 - Controller Manager - -#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' -``` - -**Returned Value:** `--terminated-pod-gc-threshold=1000` - -**Result:** Pass - -#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' -``` - -**Returned Value:** `--use-service-account-credentials=true` - -**Result:** Pass - -#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' -``` - -**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` - -**Result:** Pass - -#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' -``` - -**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) - -**Notes** - -RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' -``` - -**Returned Value:** `RotateKubeletServerCertificate=true` - -**Result:** Pass - -#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` - -**Result:** Pass - -### 1.4 - Configuration Files - -#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -**Note** -This may return a lockfile. Permissions on this file do not need to be as restrictive as the CNI files. - -``` bash -stat -c "%n - %a" /var/lib/cni/networks/k8s-pod-network/* -``` - -**Returned Value:** - -``` bash -/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - 644 -/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - 644 -/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - 644 -/var/lib/cni/networks/k8s-pod-network/lock - 750 -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -stat -c "%n - %a" /etc/cni/net.d/* -``` - -**Returned Value:** - -``` bash -/etc/cni/net.d/10-canal.conflist - 664 -/etc/cni/net.d/calico-kubeconfig - 600 -``` - -**Result:** Pass - -#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -``` bash -stat -c "%n - %U:%G" /var/lib/cni/networks/k8s-pod-network/* -``` - -**Returned Value:** - -``` bash -/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - root:root -/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - root:root -/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - root:root -/var/lib/cni/networks/k8s-pod-network/lock - root:root -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -stat -c "%n - %U:%G" /etc/cni/net.d/* -``` - -**Returned Value:** - -``` bash -/etc/cni/net.d/10-canal.conflist - root:root -/etc/cni/net.d/calico-kubeconfig - root:root -``` - -**Result:** Pass - -#### 1.4.11 - Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored) - -**Notes** - -Files underneath the data dir have permissions set to `700` - -``` bash -stat -c "%n - %a" /var/lib/etcd/* - -/var/lib/etcd/member - 700 -``` - -**Audit** - -``` bash -stat -c %a /var/lib/etcd -``` - -**Returned Value:** `755` - -**Result:** Fail - -#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) - -**Notes** - -The `etcd` container runs as the `root` user. The data directory and files are owned by `root`. - -**Audit** - -``` bash -stat -c %U:%G /var/lib/etcd -``` - -**Returned Value:** `root:root` - -**Result:** Fail - -#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this kube_config_cluster.yml file be kept in secure store. - -**Result:** Pass (Not Applicable) - -#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) - -**Notes** - -RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. - -**Result:** Pass (Not Applicable) - -#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.19 - Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored) - -**Audit** - -``` bash -ls -laR /etc/kubernetes/ssl/ |grep -v yaml - -``` - -**Returned Value:** -``` bash -total 128 -drwxr-xr-x 2 root root 4096 Jul 1 19:53 . -drwxr-xr-x 4 root root 4096 Jul 1 19:53 .. --rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-key.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-proxy-client-key.pem --rw-r--r-- 1 root root 1107 Jul 1 19:53 kube-apiserver-proxy-client.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-apiserver-requestheader-ca-key.pem --rw-r--r-- 1 root root 1082 Jul 1 19:53 kube-apiserver-requestheader-ca.pem --rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-apiserver.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-ca-key.pem --rw-r--r-- 1 root root 1017 Jul 1 19:53 kube-ca.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-controller-manager-key.pem --rw-r--r-- 1 root root 1062 Jul 1 19:53 kube-controller-manager.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-16-161-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-16-161.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-etcd-172-31-24-134-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-24-134.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-30-57-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-30-57.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-node-key.pem --rw-r--r-- 1 root root 1070 Jul 1 19:53 kube-node.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-proxy-key.pem --rw-r--r-- 1 root root 1046 Jul 1 19:53 kube-proxy.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-scheduler-key.pem --rw-r--r-- 1 root root 1050 Jul 1 19:53 kube-scheduler.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-service-account-token-key.pem --rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-service-account-token.pem -``` - -**Result:** Pass - -#### 1.4.20 - Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c "%n - %a" /etc/kubernetes/ssl/*.pem |grep -v key - -``` - -**Returned Value:** -``` bash -/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem - 644 -/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem - 644 -/etc/kubernetes/ssl/kube-apiserver.pem - 644 -/etc/kubernetes/ssl/kube-ca.pem - 644 -/etc/kubernetes/ssl/kube-controller-manager.pem - 644 -/etc/kubernetes/ssl/kube-etcd-172-31-16-161.pem - 644 -/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem - 644 -/etc/kubernetes/ssl/kube-etcd-172-31-30-57.pem - 644 -/etc/kubernetes/ssl/kube-node.pem - 644 -/etc/kubernetes/ssl/kube-proxy.pem - 644 -/etc/kubernetes/ssl/kube-scheduler.pem - 644 -/etc/kubernetes/ssl/kube-service-account-token.pem - 644 -``` - -**Result:** Pass - -#### 1.4.21 - Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored) - -**Audit** - -``` bash -stat -c "%n - %a" /etc/kubernetes/ssl/*key* - -``` - -**Returned Value:** -``` bash -/etc/kubernetes/ssl/kube-apiserver-key.pem - 600 -/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem - 600 -/etc/kubernetes/ssl/kube-apiserver-requestheader-ca-key.pem - 600 -/etc/kubernetes/ssl/kube-ca-key.pem - 600 -/etc/kubernetes/ssl/kube-controller-manager-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-16-161-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-30-57-key.pem - 600 -/etc/kubernetes/ssl/kube-node-key.pem - 600 -/etc/kubernetes/ssl/kube-proxy-key.pem - 600 -/etc/kubernetes/ssl/kube-scheduler-key.pem - 600 -/etc/kubernetes/ssl/kube-service-account-token-key.pem - 600 -``` - -**Result:** Pass - -### 1.5 - etcd - -#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Audit** `(--cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' -``` - -**Note** -Certificate file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem` - -**Audit** (`--key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' -``` - -**Note** -Key file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem` - -**Result:** Pass - -#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--client-cert-auth` - -**Result:** Pass - -#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--peer-cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' -``` - -**Note** -Certificate file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` - -**Audit** (`--peer-key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' -``` - -**Note** -Key file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` - -**Result:** Pass - -#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--peer-client-cert-auth` - -**Result:** Pass - -#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) - -**Mitigation** - -RKE supports connecting to an external etcd cluster. This external cluster could be configured with its own discreet CA. - -**Notes** - -`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass (See Mitigation) - -#### 1.6 - General Security Primitives - -These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. - -#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) - - -Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. - -Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. - -#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) - -With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. - -#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. - -See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. - -#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) - -Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. - -#### 1.6.5 - Apply security context to your pods and containers (Not Scored) - -This practice does go against control 1.1.13, but we prefer using a PodSecurityPolicy and allowing security context to be set over a blanket deny. - -Rancher allows users to set various Security Context options when launching pods via the GUI interface. - -#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) - -Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. - -#### 1.6.7 - Configure network policies as appropriate (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. - -See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. - -#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) - -Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. - -With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. - - -#### 1.7 - Pod Security Policies (PSP) - -This RKE configuration has two Pod Security Policies. - -- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. -- `restricted`: This is the cluster default PSP and follows the best practices defined by controls in this section. - -#### 1.7.1 - Do not admit privileged containers (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.privileged}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostPID}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostIPC}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostNetwork}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" -``` - -**Returned Value:** `[NET_RAW]` - -**Result:** Pass - -## 2 - Worker Node Security Configuration - -### 2.1 - Kubelet - -#### 2.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' -``` - -**Returned Value:** `--authorization-mode=Webhook` - -**Result:** Pass - -#### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 2.1.4 - Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' -``` - -**Returned Value:** `--read-only-port=0` - -**Result:** Pass - -#### 2.1.5 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' -``` - -**Returned Value:** `--streaming-connection-idle-timeout=1800s` - -**Result:** Pass - -#### 2.1.6 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' -``` - -**Returned Value:** `--protect-kernel-defaults=true` - -**Result:** Pass - -#### 2.1.7 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' -``` - -**Returned Value:** `--make-iptables-util-chains=true` - -**Result:** Pass - -#### 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) - -**Notes** -This is used by most cloud providers. Not setting this is not practical in most cases. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--hostname-override=.*").string' -``` - -**Returned Value:** `--hostname-override=` - -**Result:** Fail - -#### 2.1.9 - Ensure that the `--event-qps` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' -``` - -**Returned Value:** `--event-qps=0` - -**Result:** Pass - -#### 2.1.10 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Notes** - -RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `null` - -**Audit** (`--tls-private-key-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 2.1.11 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 2.1.12 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Notes** - -RKE handles certificate rotation through an external process. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' -``` - -**Returned Value:** `null` - -**Result:** Pass (Not Applicable) - -#### 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' -``` - -**Returned Value:** `RotateKubeletServerCertificate=true` - -**Result:** Pass - -#### 2.1.14 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -### 2.2 - Configuration Files - -#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - - -#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) diff --git a/content/rancher/v2.x/en/security/rancher-2.2/hardening-2.2/_index.md b/content/rancher/v2.x/en/security/rancher-2.2/hardening-2.2/_index.md deleted file mode 100644 index 8de41eee9..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.2/hardening-2.2/_index.md +++ /dev/null @@ -1,1230 +0,0 @@ ---- -title: Hardening Guide v2.2 -weight: 103 -aliases: - - /rancher/v2.x/en/security/hardening-2.2 ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.2.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Hardening_Guide.pdf) - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x]({{}}/rancher/v2.x/en/security/benchmark-2.2/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher HA Kubernetes cluster host configuration - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -kernel.panic=10 -kernel.panic_on_oops=1 -``` - -- Run `sysctl -p` to enable the settings. - -### 1.1.2 - Install the encryption provider configuration on all control plane nodes - -**Profile Applicability** - -- Level 1 - -**Description** - -Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: - -**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` - -**Rationale** - -This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. - -This supports the following controls: - -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) - -**Audit** - -On the control plane hosts for the Rancher HA cluster run: - -``` bash -stat /opt/kubernetes/encryption.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. - -**Remediation** - -- Generate a key and an empty configuration file: - -``` bash -head -c 32 /dev/urandom | base64 -i - -touch /opt/kubernetes/encryption.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/encryption.yaml -chmod 0600 /opt/kubernetes/encryption.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `secret` is the 32-byte base64-encoded string generated in the first step. - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.3 - Install the audit log configuration on all control plane nodes. - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. - -**Rationale** - -The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. - -This supports the following controls: - -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) - -**Audit** - -On each control plane node, run: - -``` bash -stat /opt/kubernetes/audit.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/audit.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/audit.yaml -chmod 0600 /opt/kubernetes/audit.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.4 - Place Kubernetes event limit configuration on each control plane host - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. - -**Rationale** - -Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. - -This supports the following control: - -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Audit** - -On nodes with the `controlplane` role run: - -``` bash -stat /opt/kubernetes/admission.yaml -stat /opt/kubernetes/event.yaml -``` - -For each file, ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` - -For `admission.yaml` ensure that the file contains: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -For `event.yaml` ensure that the file contains: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/admission.yaml -touch /opt/kubernetes/event.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/admission.yaml -chown root:root /opt/kubernetes/event.yaml -chmod 0600 /opt/kubernetes/admission.yaml -chmod 0600 /opt/kubernetes/event.yaml -``` - -- For `admission.yaml` set the contents to: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -- For `event.yaml` set the contents to: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix A. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - extra_args: - authorization-mode: "Webhook" - streaming-connection-idle-timeout: "" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling argument` is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---encryption-provider-config=/opt/kubernetes/encryption.yaml ---admission-control-config-file=/opt/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=5 ---audit-log-maxbackup=5 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/opt/kubernetes/audit.yaml ---tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /opt/kubernetes/encryption.yaml - admission-control-config-file: "/opt/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - - "/opt/kubernetes:/opt/kubernetes" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - … - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole psp:restricted -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding psp:restricted -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- While upgrading or installing Rancher 2.2.x, provide the following flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local administrator password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local administrator password should be changed from the default. - -**Rationale** - -The default administrator password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - ---- - -## Appendix A - Complete RKE `cluster.yml` Example - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] - -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "1800s" - authorization-mode: "Webhook" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /opt/kubernetes/encryption.yaml - admission-control-config-file: "/opt/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - - "/opt/kubernetes:/opt/kubernetes" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -addons: | - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/_index.md deleted file mode 100644 index 0f3f04da6..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Rancher v2.3.x -weight: 3 ---- - -The relevant Hardening Guide and Self Assessment guide depends on your Rancher version: - -- [Rancher v2.3.5](./rancher-v2.3.5) -- [Rancher v2.3.3](./rancher-v2.3.3) -- [Rancher v2.3.0](./rancher-v2.3.0) \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md deleted file mode 100644 index aa31c9c9a..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher v2.3.0 -weight: 3 ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.3) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3 | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes 1.15 | Benchmark v1.4.1 - -### Hardening Guide - -This hardening [guide](./hardening-2.3) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md deleted file mode 100644 index 1b7056339..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md +++ /dev/null @@ -1,1770 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide v2.3 -weight: 207 -aliases: - - /rancher/v2.x/en/security/benchmark-2.3 ---- - -This document is a companion to the Rancher v2.3 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3 | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes 1.15 | Benchmark v1.4.1 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Benchmark_Assessment.pdf) - -> The CIS Benchmark version v1.4.1 covers the security posture of Kubernetes 1.13 clusters. This self-assessment has been run against Kubernetes 1.15, using the guidelines outlined in the CIS v1.4.1 benchmark. Updates to the CIS benchmarks will be applied to this document as they are released. - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.4.1. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. - -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. - -### Controls - ---- - -## 1 - Master Node Security Configuration - -### 1.1 - API Server - -#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Notes** - -Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' -``` - -**Returned Value:** `--insecure-port=0` - -**Result:** Pass - -#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' -``` - -**Returned Value:** `--secure-port=6443` - -**Result:** Pass - -#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) - -**Note:** This deprecated flag was removed in 1.14, so it cannot be set. - -**Result:** Not Applicable - -#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' -``` - -**Returned Value:** `AlwaysPullImages` - -**Result:** Pass - -#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' -``` - -**Returned Value:** `DenyEscalatingExec` - -**Result:** Pass - -#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Not Scored) - -**Notes** - -This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the CIS Benchmark document: - -> This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies - -Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Document - -#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' -``` - -**Returned Value:** `NamespaceLifecycle` - -**Result:** Pass - -#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) - -**Notes** - -This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' -``` - -**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` - -**Result:** Pass - -#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxage=5` - -**Result:** Pass - -#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxbackup=5` - -**Result:** Pass - -#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxsize=100` - -**Result:** Pass - -#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` - -**Result:** Pass - -#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Notes** - -RKE is using the kubelet's ability to automatically create self-signed certs. No CA cert is saved to verify the communication between `kube-apiserver` and `kubelet`. - -**Mitigation** - -Make sure nodes with `role:controlplane` are on the same local network as your nodes with `role:worker`. Use network ACLs to restrict connections to the kubelet port (10250/tcp) on worker nodes, only permitting it from controlplane nodes. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' -``` - -**Returned Value:** none - -**Result:** Pass - -#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Audit** (`--kubelet-client-certificate`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' -``` - -**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--kubelet-client-key`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' -``` - -**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' -``` - -**Returned Value:** `--service-account-lookup=true` - -**Result:** Pass - -#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' -``` - -**Returned Value:** `PodSecurityPolicy` - -**Result:** Pass - -#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' -``` - -**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` - -**Result:** Pass - -#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Audit** (`--etcd-certfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' -``` - -**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` - -**Audit** (`--etcd-keyfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' -``` - -**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` - -**Result:** Pass - -#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' -``` - -**Returned Value:** `ServiceAccount` - -**Result:** Pass - -#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--tls-key-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' -``` - -**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.32 - Ensure that the `--authorization-mode` argument includes Node (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` - -**Result:** Pass - -#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' -``` - -**Returned Value:** `NodeRestriction` - -**Result:** Pass - -#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) - -**Notes** -In Kubernetes 1.15.x this flag is `--encryption-provider-config` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' -``` - -**Returned Value:** `encryption-provider-config=/opt/kubernetes/encryption.yaml` - -**Result:** Pass - -#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) - -**Notes** - -Only the first provider in the list is active. - -**Audit** - -``` bash -grep -A 1 providers: /opt/kubernetes/encryption.yaml | grep aescbc -``` - -**Returned Value:** `- aescbc:` - -**Result:** Pass - -#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Notes** - -The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: - -- `/opt/kubernetes/admission.yaml` -- `/opt/kubernetes/event.yaml` - -See Host Configuration for details. - -**Audit** (Admissions plugin) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' -``` - -**Returned Value:** `EventRateLimit` - -**Audit** (`--admission-control-config-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' -``` - -**Returned Value:** `--admission-control-config-file=/opt/kubernetes/admission.yaml` - -**Result:** Pass - -#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) - -**Notes** - -`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. - -**Audit** (Feature Gate) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** (Audit Policy File) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' -``` - -**Returned Value:** `--audit-policy-file=/opt/kubernetes/audit.yaml` - -**Result:** Pass - -#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Notes** - -RKE uses the default value of 60s and doesn't set this option. Tuning this value is specific to the environment. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### Ensure that the --authorization-mode argument includes RBAC (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=.*").string' -``` - -**Returned Value:** `"--authorization-mode=Node,RBAC"` - -**Result:** Pass - -### 1.2 - Scheduler - -#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` - -**Result:** Pass - -### 1.3 - Controller Manager - -#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' -``` - -**Returned Value:** `--terminated-pod-gc-threshold=1000` - -**Result:** Pass - -#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' -``` - -**Returned Value:** `--use-service-account-credentials=true` - -**Result:** Pass - -#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' -``` - -**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` - -**Result:** Pass - -#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' -``` - -**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) - -**Notes** - -RKE handles certificate rotation through an external process. - -**Result:** Not Applicable - -#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` - -**Result:** Pass - -### 1.4 - Configuration Files - -#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -**Note** -This may return a lockfile. Permissions on this file do not need to be as restrictive as the CNI files. - -``` bash -stat -c "%n - %a" /var/lib/cni/networks/k8s-pod-network/* -``` - -**Returned Value:** - -``` bash -/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - 644 -/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - 644 -/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - 644 -/var/lib/cni/networks/k8s-pod-network/lock - 750 -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -stat -c "%n - %a" /etc/cni/net.d/* -``` - -**Returned Value:** - -``` bash -/etc/cni/net.d/10-canal.conflist - 664 -/etc/cni/net.d/calico-kubeconfig - 600 -``` - -**Result:** Pass - -#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -``` bash -stat -c "%n - %U:%G" /var/lib/cni/networks/k8s-pod-network/* -``` - -**Returned Value:** - -``` bash -/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - root:root -/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - root:root -/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - root:root -/var/lib/cni/networks/k8s-pod-network/lock - root:root -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -stat -c "%n - %U:%G" /etc/cni/net.d/* -``` - -**Returned Value:** - -``` bash -/etc/cni/net.d/10-canal.conflist - root:root -/etc/cni/net.d/calico-kubeconfig - root:root -``` - -**Result:** Pass - -#### 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Notes** - -Files underneath the data dir have permissions set to `700` - -``` bash -stat -c "%n - %a" /var/lib/rancher/etcd/* - -/var/lib/etcd/member - 700 -``` - -**Audit** - -``` bash -stat -c %a /var/lib/rancher/etcd -``` - -**Returned Value:** `700` - -**Result:** Pass - -#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) - -**Notes** - -The `etcd` container runs as the `etcd` user. The data directory and files are owned by `etcd`. - -**Audit** - -``` bash -stat -c %U:%G /var/lib/rancher/etcd -``` - -**Returned Value:** `etcd:etcd` - -**Result:** Pass - -#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this kube_config_cluster.yml file be kept in secure store. - -**Result:** Not Applicable - -#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) - -**Notes** - -RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. - -**Result:** Not Applicable - -#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.19 - Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored) - -**Audit** - -``` bash -ls -laR /etc/kubernetes/ssl/ |grep -v yaml - -``` - -**Returned Value:** -``` bash -total 128 -drwxr-xr-x 2 root root 4096 Jul 1 19:53 . -drwxr-xr-x 4 root root 4096 Jul 1 19:53 .. --rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-key.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-proxy-client-key.pem --rw-r--r-- 1 root root 1107 Jul 1 19:53 kube-apiserver-proxy-client.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-apiserver-requestheader-ca-key.pem --rw-r--r-- 1 root root 1082 Jul 1 19:53 kube-apiserver-requestheader-ca.pem --rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-apiserver.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-ca-key.pem --rw-r--r-- 1 root root 1017 Jul 1 19:53 kube-ca.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-controller-manager-key.pem --rw-r--r-- 1 root root 1062 Jul 1 19:53 kube-controller-manager.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-16-161-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-16-161.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-etcd-172-31-24-134-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-24-134.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-30-57-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-30-57.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-node-key.pem --rw-r--r-- 1 root root 1070 Jul 1 19:53 kube-node.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-proxy-key.pem --rw-r--r-- 1 root root 1046 Jul 1 19:53 kube-proxy.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-scheduler-key.pem --rw-r--r-- 1 root root 1050 Jul 1 19:53 kube-scheduler.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-service-account-token-key.pem --rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-service-account-token.pem -``` - -**Result:** Pass - -#### 1.4.20 - Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c "%n - %a" /etc/kubernetes/ssl/*.pem |grep -v key - -``` - -**Returned Value:** -``` bash -/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem - 644 -/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem - 644 -/etc/kubernetes/ssl/kube-apiserver.pem - 644 -/etc/kubernetes/ssl/kube-ca.pem - 644 -/etc/kubernetes/ssl/kube-controller-manager.pem - 644 -/etc/kubernetes/ssl/kube-etcd-172-31-16-161.pem - 644 -/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem - 644 -/etc/kubernetes/ssl/kube-etcd-172-31-30-57.pem - 644 -/etc/kubernetes/ssl/kube-node.pem - 644 -/etc/kubernetes/ssl/kube-proxy.pem - 644 -/etc/kubernetes/ssl/kube-scheduler.pem - 644 -/etc/kubernetes/ssl/kube-service-account-token.pem - 644 -``` - -**Result:** Pass - -#### 1.4.21 - Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored) - -**Audit** - -``` bash -stat -c "%n - %a" /etc/kubernetes/ssl/*key* - -``` - -**Returned Value:** -``` bash -/etc/kubernetes/ssl/kube-apiserver-key.pem - 600 -/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem - 600 -/etc/kubernetes/ssl/kube-apiserver-requestheader-ca-key.pem - 600 -/etc/kubernetes/ssl/kube-ca-key.pem - 600 -/etc/kubernetes/ssl/kube-controller-manager-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-16-161-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-30-57-key.pem - 600 -/etc/kubernetes/ssl/kube-node-key.pem - 600 -/etc/kubernetes/ssl/kube-proxy-key.pem - 600 -/etc/kubernetes/ssl/kube-scheduler-key.pem - 600 -/etc/kubernetes/ssl/kube-service-account-token-key.pem - 600 -``` - -**Result:** Pass - -### 1.5 - etcd - -#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Audit** `(--cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' -``` - -**Note** -Certificate file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem` - -**Audit** (`--key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' -``` - -**Note** -Key file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem` - -**Result:** Pass - -#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--client-cert-auth` - -**Result:** Pass - -#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--peer-cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' -``` - -**Note** -Certificate file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` - -**Audit** (`--peer-key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' -``` - -**Note** -Key file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` - -**Result:** Pass - -#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--peer-client-cert-auth` - -**Result:** Pass - -#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) - -**Mitigation** - -RKE supports connecting to an external etcd cluster. This external cluster could be configured with its own discreet CA. - -**Notes** - -`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass (See Mitigation) - -#### 1.6 - General Security Primitives - -These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. - -#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) - - -Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. - -Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. - -#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) - -With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. - -#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. - -See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. - -#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) - -Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. - -#### 1.6.5 - Apply security context to your pods and containers (Not Scored) - -This practice does go against control 1.1.13, but we prefer using a PodSecurityPolicy and allowing security context to be set over a blanket deny. - -Rancher allows users to set various Security Context options when launching pods via the GUI interface. - -#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) - -Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. - -#### 1.6.7 - Configure network policies as appropriate (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. - -See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. - -#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) - -Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. - -With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. - - -#### 1.7 - Pod Security Policies (PSP) - -This RKE configuration has two Pod Security Policies. - -- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. -- `restricted`: This is the cluster default PSP and follows the best practices defined by controls in this section. - -#### 1.7.1 - Do not admit privileged containers (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.privileged}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostPID}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostIPC}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.hostNetwork}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" -``` - -**Returned Value:** `[NET_RAW]` - -**Result:** Pass - -## 2 - Worker Node Security Configuration - -### 2.1 - Kubelet - -#### 2.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' -``` - -**Returned Value:** `--authorization-mode=Webhook` - -**Result:** Pass - -#### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 2.1.4 - Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' -``` - -**Returned Value:** `--read-only-port=0` - -**Result:** Pass - -#### 2.1.5 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' -``` - -**Returned Value:** `--streaming-connection-idle-timeout=1800s` - -**Result:** Pass - -#### 2.1.6 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' -``` - -**Returned Value:** `--protect-kernel-defaults=true` - -**Result:** Pass - -#### 2.1.7 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' -``` - -**Returned Value:** `--make-iptables-util-chains=true` - -**Result:** Pass - -#### 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) - -**Notes** -This is used by most cloud providers. Not setting this is not practical in most cases. - -**Result:** Not Applicable - -#### 2.1.9 - Ensure that the `--event-qps` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' -``` - -**Returned Value:** `--event-qps=0` - -**Result:** Pass - -#### 2.1.10 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Notes** - -RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `null` - -**Audit** (`--tls-private-key-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 2.1.11 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 2.1.12 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Notes** - -RKE handles certificate rotation through an external process. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' -``` - -**Returned Value:** `null` - -**Result:** Not Applicable - -#### 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Notes** - -RKE handles certificate rotation through an external process. - -**Result:** Not Applicable - -#### 2.1.14 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -### 2.2 - Configuration Files - -#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - - -#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `644` - -**Result:** Pass - -#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable - -#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Not Applicable diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md deleted file mode 100644 index 965c2c20c..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md +++ /dev/null @@ -1,1545 +0,0 @@ ---- -title: Hardening Guide v2.3 -weight: 102 -aliases: - - /rancher/v2.x/en/security/hardening-2.3 ---- -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.0-v2.3.2. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{}}/rancher/v2.x/en/security/benchmark-2.3/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher HA Kubernetes cluster host configuration - -(See Appendix A. for full ubuntu `cloud-config` example) - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `vm.panic_on_oom = 0` - -``` bash -sysctl vm.panic_on_oom -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -- Verify `kernel.keys.root_maxkeys = 1000000` - -``` bash -sysctl kernel.keys.root_maxkeys -``` - -- Verify `kernel.keys.root_maxbytes = 25000000` - -``` bash -sysctl kernel.keys.root_maxbytes -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxkeys=1000000 -kernel.keys.root_maxbytes=25000000 -``` - -- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### 1.1.2 - Install the encryption provider configuration on all control plane nodes - -**Profile Applicability** - -- Level 1 - -**Description** - -Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: - -**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` - -**Rationale** - -This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. - -This supports the following controls: - -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) - -**Audit** - -On the control plane hosts for the Rancher HA cluster run: - -``` bash -stat /opt/kubernetes/encryption.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. - -**Remediation** - -- Generate a key and an empty configuration file: - -``` bash -head -c 32 /dev/urandom | base64 -i - -touch /opt/kubernetes/encryption.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/encryption.yaml -chmod 0600 /opt/kubernetes/encryption.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `secret` is the 32-byte base64-encoded string generated in the first step. - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.3 - Install the audit log configuration on all control plane nodes. - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. - -**Rationale** - -The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. - -This supports the following controls: - -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) - -**Audit** - -On each control plane node, run: - -``` bash -stat /opt/kubernetes/audit.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/audit.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/audit.yaml -chmod 0600 /opt/kubernetes/audit.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.4 - Place Kubernetes event limit configuration on each control plane host - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. - -**Rationale** - -Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. - -This supports the following control: - -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Audit** - -On nodes with the `controlplane` role run: - -``` bash -stat /opt/kubernetes/admission.yaml -stat /opt/kubernetes/event.yaml -``` - -For each file, ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` - -For `admission.yaml` ensure that the file contains: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -For `event.yaml` ensure that the file contains: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/admission.yaml -touch /opt/kubernetes/event.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/admission.yaml -chown root:root /opt/kubernetes/event.yaml -chmod 0600 /opt/kubernetes/admission.yaml -chmod 0600 /opt/kubernetes/event.yaml -``` - -- For `admission.yaml` set the contents to: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -- For `event.yaml` set the contents to: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory has permissions of 700 or more restrictive. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. - -**Audit** - -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %a /var/lib/rancher/etcd -``` - -Verify that the permissions are `700` or more restrictive. - -**Remediation** - -Follow the steps as documented in [1.4.12]({{}}/rancher/v2.x/en/security/hardening-2.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. - -### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory ownership is set to `etcd:etcd`. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. - -**Audit** - -On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %U:%G /var/lib/rancher/etcd -``` - -Verify that the ownership is set to `etcd:etcd`. - -**Remediation** - -- On the etcd server node(s) add the `etcd` user: - -``` bash -useradd etcd -``` - -Record the uid/gid: - -``` bash -id etcd -``` - -- Add the following to the RKE `cluster.yml` etcd section under `services`: - -``` yaml -services: - etcd: - uid: - gid: -``` - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix B. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - extra_args: - authorization-mode: "Webhook" - streaming-connection-idle-timeout: "" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---encryption-provider-config=/opt/kubernetes/encryption.yaml ---admission-control-config-file=/opt/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=5 ---audit-log-maxbackup=5 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/opt/kubernetes/audit.yaml ---tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube-api: - pod_security_policy: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - … - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole psp:restricted -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding psp:restricted -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- While upgrading or installing Rancher 2.3.x, provide the following flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local administrator password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local administrator password should be changed from the default. - -**Rationale** - -The default administrator password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - ---- - -## Appendix A - Complete ubuntu `cloud-config` Example - -`cloud-config` file to automate hardening manual steps on nodes deployment. - -``` -#cloud-config -bootcmd: -- apt-get update -- apt-get install -y apt-transport-https -apt: - sources: - docker: - source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" - keyid: 0EBFCD88 -packages: -- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] -- jq -write_files: -# 1.1.1 - Configure default sysctl settings on all hosts -- path: /etc/sysctl.d/90-kubelet.conf - owner: root:root - permissions: '0644' - content: | - vm.overcommit_memory=1 - vm.panic_on_oom=0 - kernel.panic=10 - kernel.panic_on_oops=1 - kernel.keys.root_maxkeys=1000000 - kernel.keys.root_maxbytes=25000000 -# 1.1.2 encription provider -- path: /opt/kubernetes/encryption.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: QRCexFindur3dzS0P/UmHs5xA6sKu58RbtWOQFarfh4= - - identity: {} -# 1.1.3 audit log -- path: /opt/kubernetes/audit.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: audit.k8s.io/v1beta1 - kind: Policy - rules: - - level: Metadata -# 1.1.4 event limit -- path: /opt/kubernetes/admission.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: apiserver.k8s.io/v1alpha1 - kind: AdmissionConfiguration - plugins: - - name: EventRateLimit - path: /opt/kubernetes/event.yaml -- path: /opt/kubernetes/event.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: eventratelimit.admission.k8s.io/v1alpha1 - kind: Configuration - limits: - - type: Server - qps: 5000 - burst: 20000 -# 1.4.12 etcd user -groups: - - etcd -users: - - default - - name: etcd - gecos: Etcd user - primary_group: etcd - homedir: /var/lib/etcd -# 1.4.11 etcd data dir -runcmd: - - chmod 0700 /var/lib/etcd - - usermod -G docker -a ubuntu - - sysctl -p /etc/sysctl.d/90-kubelet.conf -``` - -## Appendix B - Complete RKE `cluster.yml` Example - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] - -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "1800s" - authorization-mode: "Webhook" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - generate_serving_certificate: true - kube-api: - pod_security_policy: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - services: - etcd: - uid: 1001 - gid: 1001 -addons: | - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -## Appendix C - Complete RKE Template Example - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - ignore_docker_version: true -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 1001 - retention: 72h - snapshot: false - uid: 1001 - kube_api: - always_pull_images: false - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: 'false' - audit-log-format: json - audit-log-maxage: '5' - audit-log-maxbackup: '5' - audit-log-maxsize: '100' - audit-log-path: /var/log/kube-audit/audit-log.json - enable-admission-plugins: >- - ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy - profiling: 'false' - service-account-lookup: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: - - '/opt/kubernetes:/opt/kubernetes' - pod_security_policy: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md deleted file mode 100644 index 77c1c408a..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher v2.3.3 -weight: 2 ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.3.3) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.3 | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 - -### Hardening Guide - -This hardening [guide](./hardening-2.3.3) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md deleted file mode 100644 index 385d077c0..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md +++ /dev/null @@ -1,1787 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.3 -weight: 206 -aliases: - - /rancher/v2.x/en/security/benchmark-2.3.3 ---- - -This document is a companion to the Rancher v2.3.3 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.3 | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Benchmark_Assessment.pdf) - -> The CIS Benchmark version v1.4.1 covers the security posture of Kubernetes 1.13 clusters. This self-assessment has been run against Kubernetes 1.16, using the guidelines outlined in the CIS v1.4.1 benchmark. Updates to the CIS benchmarks will be applied to this document as they are released. - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.4.1. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. - -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. - -#### Known Scored Control Failures - -The following scored controls do not currently pass, and Rancher Labs is working towards addressing these through future enhancements to the product. - -- 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) -- 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) - -### Controls - ---- - -## 1 - Master Node Security Configuration - -### 1.1 - API Server - -#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Notes** - -Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' -``` - -**Returned Value:** `--insecure-port=0` - -**Result:** Pass - -#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' -``` - -**Returned Value:** `--secure-port=6443` - -**Result:** Pass - -#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) - -**Note:** This deprecated flag was removed in 1.14, so it cannot be set. - -**Result:** Pass - -#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' -``` - -**Returned Value:** `AlwaysPullImages` - -**Result:** Pass - -#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' -``` - -**Returned Value:** `DenyEscalatingExec` - -**Result:** Pass - -#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Not Scored) - -**Notes** - -This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the CIS Benchmark document: - -> This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies - -Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Document - -#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' -``` - -**Returned Value:** `NamespaceLifecycle` - -**Result:** Pass - -#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) - -**Notes** - -This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' -``` - -**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` - -**Result:** Pass - -#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxage=30` - -**Result:** Pass - -#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxbackup=10` - -**Result:** Pass - -#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Notes** - -Audit logs should be collected and shipped off-system to guarantee their integrity. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' -``` - -**Returned Value:** `--audit-log-maxsize=100` - -**Result:** Pass - -#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` - -**Result:** Pass - -#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' -``` - -**Returned Value:** `--kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Audit** (`--kubelet-client-certificate`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' -``` - -**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--kubelet-client-key`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' -``` - -**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' -``` - -**Returned Value:** `--service-account-lookup=true` - -**Result:** Pass - -#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' -``` - -**Returned Value:** `PodSecurityPolicy` - -**Result:** Pass - -#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' -``` - -**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` - -**Result:** Pass - -#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Audit** (`--etcd-certfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' -``` - -**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` - -**Audit** (`--etcd-keyfile`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' -``` - -**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` - -**Result:** Pass - -#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' -``` - -**Returned Value:** `ServiceAccount` - -**Result:** Pass - -#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` - -**Audit** (`--tls-key-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` - -**Result:** Pass - -#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' -``` - -**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.1.32 - Ensure that the `--authorization-mode` argument includes Node (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' -``` - -**Returned Value:** `--authorization-mode=Node,RBAC` - -**Result:** Pass - -#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' -``` - -**Returned Value:** `NodeRestriction` - -**Result:** Pass - -#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) - -**Notes** -In Kubernetes 1.16.x this flag is `--encryption-provider-config` - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' -``` - -**Returned Value:** `encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml` - -**Result:** Pass - -#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) - -**Notes** - -Only the first provider in the list is active. - -**Audit** - -``` bash -grep -A 1 providers: /etc/kubernetes/ssl/encryption.yaml | grep aescbc -``` - -**Returned Value:** `- aescbc:` - -**Result:** Pass - -#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Notes** - -The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: - -- `/etc/kubernetes/admission.yaml` - -See Host Configuration for details. - -**Audit** (Admissions plugin) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' -``` - -**Returned Value:** `EventRateLimit` - -**Audit** (`--admission-control-config-file`) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' -``` - -**Returned Value:** `--admission-control-config-file=/etc/kubernetes/admission.yaml` - -**Result:** Pass - -#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) - -**Notes** - -`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. - -**Audit** (Feature Gate) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** (Audit Policy File) - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' -``` - -**Returned Value:** `--audit-policy-file=/etc/kubernetes/audit-policy.yaml` - -**Result:** Pass - -#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Notes** - -RKE uses the default value of 60s and doesn't set this option. Tuning this value is specific to the environment. - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### Ensure that the --authorization-mode argument includes RBAC (Scored) - -**Audit** - -``` bash -docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=.*").string' -``` - -**Returned Value:** `"--authorization-mode=Node,RBAC"` - -**Result:** Pass - -### 1.2 - Scheduler - -#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` - -**Result:** Pass - -### 1.3 - Controller Manager - -#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' -``` - -**Returned Value:** `--terminated-pod-gc-threshold=1000` - -**Result:** Pass - -#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' -``` - -**Returned Value:** `--profiling=false` - -**Result:** Pass - -#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' -``` - -**Returned Value:** `--use-service-account-credentials=true` - -**Result:** Pass - -#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' -``` - -**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` - -**Result:** Pass - -#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' -``` - -**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) - -**Notes** - -RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' -``` - -**Returned Value:** `RotateKubeletServerCertificate=true` - -**Result:** Pass - -#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -``` bash -docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' -``` - -**Returned Value:** `--address=127.0.0.1` - -**Result:** Pass - -### 1.4 - Configuration Files - -#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -**Note** -This may return a lockfile. Permissions on this file do not need to be as restrictive as the CNI files. - -``` bash -stat -c "%n - %a" /var/lib/cni/networks/k8s-pod-network/* -``` - -**Returned Value:** - -``` bash -/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - 644 -/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - 644 -/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - 644 -/var/lib/cni/networks/k8s-pod-network/lock - 750 -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -stat -c "%n - %a" /etc/cni/net.d/* -``` - -**Returned Value:** - -``` bash -/etc/cni/net.d/10-canal.conflist - 644 -/etc/cni/net.d/calico-kubeconfig - 600 -``` - -**Result:** Pass - -#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) - -**Notes** - -This is a manual check. - -**Audit** (`/var/lib/cni/networks/k8s-pod-network`) - -``` bash -stat -c "%n - %U:%G" /var/lib/cni/networks/k8s-pod-network/* -``` - -**Returned Value:** - -``` bash -/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - root:root -/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - root:root -/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - root:root -/var/lib/cni/networks/k8s-pod-network/lock - root:root -``` - -**Audit** (`/etc/cni/net.d`) - -``` bash -stat -c "%n - %U:%G" /etc/cni/net.d/* -``` - -**Returned Value:** - -``` bash -/etc/cni/net.d/10-canal.conflist - root:root -/etc/cni/net.d/calico-kubeconfig - root:root -``` - -**Result:** Pass - -#### 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Notes** - -Files underneath the data dir have permissions set to `700` - -``` bash -stat -c "%n - %a" /var/lib/etcd/* - -/var/lib/etcd/member - 700 -``` - -**Audit** - -``` bash -stat -c %a /var/lib/etcd -``` - -**Returned Value:** `700` - -**Result:** Pass - -#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) - -**Notes** - -The `etcd` container runs as the `etcd` user. The data directory and files are owned by `etcd`. - -**Audit** - -``` bash -stat -c %U:%G /var/lib/etcd -``` - -**Returned Value:** `etcd:etcd` - -**Result:** Pass - -#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this kube_config_cluster.yml file be kept in secure store. - -**Result:** Pass (Not Applicable) - -#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) - -**Notes** - -RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. - -**Result:** Pass (Not Applicable) - -#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `640` - -**Result:** Pass - -#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `640` - -**Result:** Pass - -#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 1.4.19 - Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored) - -**Audit** - -``` bash -ls -laR /etc/kubernetes/ssl/ |grep -v yaml -``` - -**Returned Value:** -``` bash -total 128 -drwxr-xr-x 2 root root 4096 Jul 1 19:53 . -drwxr-xr-x 4 root root 4096 Jul 1 19:53 .. --rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-key.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-proxy-client-key.pem --rw-r--r-- 1 root root 1107 Jul 1 19:53 kube-apiserver-proxy-client.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-apiserver-requestheader-ca-key.pem --rw-r--r-- 1 root root 1082 Jul 1 19:53 kube-apiserver-requestheader-ca.pem --rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-apiserver.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-ca-key.pem --rw-r--r-- 1 root root 1017 Jul 1 19:53 kube-ca.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-controller-manager-key.pem --rw-r--r-- 1 root root 1062 Jul 1 19:53 kube-controller-manager.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-16-161-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-16-161.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-etcd-172-31-24-134-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-24-134.pem --rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-30-57-key.pem --rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-30-57.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-node-key.pem --rw-r--r-- 1 root root 1070 Jul 1 19:53 kube-node.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-proxy-key.pem --rw-r--r-- 1 root root 1046 Jul 1 19:53 kube-proxy.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-scheduler-key.pem --rw-r--r-- 1 root root 1050 Jul 1 19:53 kube-scheduler.pem --rw------- 1 root root 1679 Jul 1 19:53 kube-service-account-token-key.pem --rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-service-account-token.pem -``` - -**Result:** Pass - -#### 1.4.20 - Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c "%n - %a" /etc/kubernetes/ssl/*.pem |grep -v key - -``` - -**Returned Value:** -``` bash -/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem - 640 -/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem - 640 -/etc/kubernetes/ssl/kube-apiserver.pem - 640 -/etc/kubernetes/ssl/kube-ca.pem - 640 -/etc/kubernetes/ssl/kube-controller-manager.pem - 640 -/etc/kubernetes/ssl/kube-etcd-172-31-16-161.pem - 640 -/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem - 640 -/etc/kubernetes/ssl/kube-etcd-172-31-30-57.pem - 640 -/etc/kubernetes/ssl/kube-node.pem - 640 -/etc/kubernetes/ssl/kube-proxy.pem - 640 -/etc/kubernetes/ssl/kube-scheduler.pem - 640 -/etc/kubernetes/ssl/kube-service-account-token.pem - 640 -``` - -**Result:** Pass - -#### 1.4.21 - Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored) - -**Audit** - -``` bash -stat -c "%n - %a" /etc/kubernetes/ssl/*key* - -``` - -**Returned Value:** -``` bash -/etc/kubernetes/ssl/kube-apiserver-key.pem - 600 -/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem - 600 -/etc/kubernetes/ssl/kube-apiserver-requestheader-ca-key.pem - 600 -/etc/kubernetes/ssl/kube-ca-key.pem - 600 -/etc/kubernetes/ssl/kube-controller-manager-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-16-161-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem - 600 -/etc/kubernetes/ssl/kube-etcd-172-31-30-57-key.pem - 600 -/etc/kubernetes/ssl/kube-node-key.pem - 600 -/etc/kubernetes/ssl/kube-proxy-key.pem - 600 -/etc/kubernetes/ssl/kube-scheduler-key.pem - 600 -/etc/kubernetes/ssl/kube-service-account-token-key.pem - 600 -``` - -**Result:** Pass - -### 1.5 - etcd - -#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Audit** `(--cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' -``` - -**Note** -Certificate file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem` - -**Audit** (`--key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' -``` - -**Note** -Key file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem` - -**Result:** Pass - -#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--client-cert-auth` - -**Result:** Pass - -#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Audit** (`--peer-cert-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' -``` - -**Note** -Certificate file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` - -**Audit** (`--peer-key-file`) - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' -``` - -**Note** -Key file name may vary slightly, since it contains the IP of the etcd container. - -**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` - -**Result:** Pass - -#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Notes** - -Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' -``` - -**Returned Value:** `--peer-client-cert-auth` - -**Result:** Pass - -#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) - -**Mitigation** - -RKE supports connecting to an external etcd cluster. This external cluster could be configured with its own discreet CA. - -**Notes** - -`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. - -**Audit** - -``` bash -docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' -``` - -**Returned Value:** `null` - -**Result:** Pass (See Mitigation) - -#### 1.6 - General Security Primitives - -These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. - -#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) - - -Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. - -Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. - -#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) - -With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. - -#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. - -See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. - -#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) - -Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. - -#### 1.6.5 - Apply security context to your pods and containers (Not Scored) - -This practice does go against control 1.1.13, but we prefer using a PodSecurityPolicy and allowing security context to be set over a blanket deny. - -Rancher allows users to set various Security Context options when launching pods via the GUI interface. - -#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) - -Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. - -#### 1.6.7 - Configure network policies as appropriate (Not Scored) - -Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. - -See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. - -#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) - -Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. - -With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. - - -#### 1.7 - Pod Security Policies (PSP) - -This RKE configuration has two Pod Security Policies. - -- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. -- `restricted-psp`: This is the cluster default PSP and follows the best practices defined by controls in this section. - -#### 1.7.1 - Do not admit privileged containers (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.privileged}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.hostPID}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.hostIPC}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.hostNetwork}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" -``` - -**Returned Value:** `RunAsAny` - -**Result:** Pass - -#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Notes** - -The restricted PodSecurityPolicy is available to all ServiceAccounts. - -**Audit** - -``` bash -kubectl get psp restricted-psp -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" -``` - -**Returned Value:** `null` - -**Result:** Pass - -## 2 - Worker Node Security Configuration - -### 2.1 - Kubelet - -#### 2.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' -``` - -**Returned Value:** `--anonymous-auth=false` - -**Result:** Pass - -#### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' -``` - -**Returned Value:** `--authorization-mode=Webhook` - -**Result:** Pass - -#### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' -``` - -**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` - -**Result:** Pass - -#### 2.1.4 - Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' -``` - -**Returned Value:** `--read-only-port=0` - -**Result:** Pass - -#### 2.1.5 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' -``` - -**Returned Value:** `--streaming-connection-idle-timeout=30m` - -**Result:** Pass - -#### 2.1.6 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' -``` - -**Returned Value:** `--protect-kernel-defaults=true` - -**Result:** Pass - -#### 2.1.7 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' -``` - -**Returned Value:** `--make-iptables-util-chains=true` - -**Result:** Pass - -#### 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) - -**Notes** -This is used by most cloud providers. Not setting this is not practical in most cases. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--hostname-override=.*").string' -``` - -**Returned Value:** `--hostname-override=` - -**Result:** Fail - -#### 2.1.9 - Ensure that the `--event-qps` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' -``` - -**Returned Value:** `--event-qps=0` - -**Result:** Pass - -#### 2.1.10 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Notes** - -RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). - -**Audit** (`--tls-cert-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' -``` - -**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-40-84.pem` - -**Audit** (`--tls-private-key-file`) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' -``` - -**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-40-84-key.pem` - -**Result:** Pass - -#### 2.1.11 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -#### 2.1.12 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Notes** - -RKE handles certificate rotation through an external process. - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' -``` - -**Returned Value:** `null` - -**Result:** Pass (Not Applicable) - -#### 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' -``` - -**Returned Value:** `RotateKubeletServerCertificate=true` - -**Result:** Pass - -#### 2.1.14 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) - -**Audit** (Allowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' -``` - -**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` - -**Audit** (Disallowed Ciphers) - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' -``` - -**Returned Value:** `null` - -**Audit** - -``` bash -docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' -``` - -**Returned Value:** `null` - -**Result:** Pass - -### 2.2 - Configuration Files - -#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `640` - -**Result:** Pass - -#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) - -**Notes** - -This is the value of the `--kubeconfig` option. - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - - -#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `640` - -**Result:** Pass - -#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Audit** - -``` bash -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `640` - -**Result:** Pass - -#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Audit** - -``` bash -stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem -``` - -**Returned Value:** `root:root` - -**Result:** Pass - -#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) - -#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) - -**Notes** - -RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. - -**Result:** Pass (Not Applicable) diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md deleted file mode 100644 index da5c65077..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md +++ /dev/null @@ -1,2044 +0,0 @@ ---- -title: Hardening Guide v2.3.3 -weight: 101 -aliases: - - /rancher/v2.x/en/security/hardening-2.3.3 ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.3. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Hardening_Guide.pdf) - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3]({{}}/rancher/v2.x/en/security/benchmark-2.3.3/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher RKE Kubernetes cluster host configuration - -(See Appendix A. for full ubuntu `cloud-config` example) - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `vm.panic_on_oom = 0` - -``` bash -sysctl vm.panic_on_oom -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -- Verify `kernel.keys.root_maxkeys = 1000000` - -``` bash -sysctl kernel.keys.root_maxkeys -``` - -- Verify `kernel.keys.root_maxbytes = 25000000` - -``` bash -sysctl kernel.keys.root_maxbytes -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxkeys=1000000 -kernel.keys.root_maxbytes=25000000 -``` - -- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory has permissions of 700 or more restrictive. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. - -**Audit** - -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %a /var/lib/etcd -``` - -Verify that the permissions are `700` or more restrictive. - -**Remediation** - -Follow the steps as documented in [1.4.12]({{}}/rancher/v2.x/en/security/hardening-2.3.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. - -### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory ownership is set to `etcd:etcd`. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. - -**Audit** - -On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %U:%G /var/lib/etcd -``` - -Verify that the ownership is set to `etcd:etcd`. - -**Remediation** - -- On the etcd server node(s) add the `etcd` user: - -``` bash -useradd -c "Etcd user" -d /var/lib/etcd etcd -``` - -Record the uid/gid: - -``` bash -id etcd -``` - -- Add the following to the RKE `cluster.yml` etcd section under `services`: - -``` yaml -services: - etcd: - uid: - gid: -``` - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix B. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy ---encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=30 ---audit-log-maxbackup=10 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit-policy.yaml ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" -``` - -For k8s 1.14 `enable-admission-plugins` should be - -``` yaml - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole restricted-clusterrole -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding restricted-clusterrolebinding -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted-psp -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted-psp - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: restricted-clusterrole - rules: - - apiGroups: - - extensions - resourceNames: - - restricted-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: restricted-clusterrolebinding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: restricted-clusterrole - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- While upgrading or installing Rancher 2.3.3 or above, provide the following flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local admin password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local admin password should be changed from the default. - -**Rationale** - -The default admin password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - -## 4.1 - Rancher Kubernetes Custom Cluster Configuration via RKE - -(See Appendix C. for full RKE template example) - -### 4.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 4.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy ---encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=30 ---audit-log-maxbackup=10 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit-policy.yaml ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" -``` - -For k8s 1.14 `enable-admission-plugins` should be - -``` yaml - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 4.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 4.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 4.1.5 - Check PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole restricted-clusterrole -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted-psp -``` - ---- - -## Appendix A - Complete ubuntu `cloud-config` Example - -`cloud-config` file to automate hardening manual steps on nodes deployment. - -``` -#cloud-config -bootcmd: -- apt-get update -- apt-get install -y apt-transport-https -apt: - sources: - docker: - source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" - keyid: 0EBFCD88 -packages: -- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] -- jq -write_files: -# 1.1.1 - Configure default sysctl settings on all hosts -- path: /etc/sysctl.d/90-kubelet.conf - owner: root:root - permissions: '0644' - content: | - vm.overcommit_memory=1 - vm.panic_on_oom=0 - kernel.panic=10 - kernel.panic_on_oops=1 - kernel.keys.root_maxkeys=1000000 - kernel.keys.root_maxbytes=25000000 -# 1.4.12 etcd user -groups: - - etcd -users: - - default - - name: etcd - gecos: Etcd user - primary_group: etcd - homedir: /var/lib/etcd -# 1.4.11 etcd data dir -runcmd: - - chmod 0700 /var/lib/etcd - - usermod -G docker -a ubuntu - - sysctl -p /etc/sysctl.d/90-kubelet.conf -``` - -## Appendix B - Complete RKE `cluster.yml` Example - -Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. - -{{% accordion id="cluster-1.14" label="RKE yaml for k8s 1.14" %}} - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -addon_job_timeout: 30 -authentication: - strategy: x509 -authorization: {} -bastion_host: - ssh_agent_auth: false -cloud_provider: {} -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.14.9-rancher1-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -restore: - restore: false -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube-api: - always_pull_images: true - audit_log: - enabled: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: 'false' - enable-admission-plugins: >- - ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit - profiling: 'false' - service-account-lookup: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: - - '/opt/kubernetes:/opt/kubernetes' - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube-controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - protect-kernel-defaults: 'true' - fail_swap_on: false - generate_serving_certificate: true - kubeproxy: {} - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' -ssh_agent_auth: false -``` - -{{% /accordion %}} - -{{% accordion id="cluster-1.15" label="RKE yaml for k8s 1.15" %}} - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.15.6-rancher1-2 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -ssh_agent_auth: false -``` - -{{% /accordion %}} - -{{% accordion id="cluster-1.16" label="RKE yaml for k8s 1.16" %}} - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.16.3-rancher1-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -ssh_agent_auth: false -``` - -{{% /accordion %}} - -## Appendix C - Complete RKE Template Example - -Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. - - -{{% accordion id="k8s-1.14" label="RKE template for k8s 1.14" %}} - -``` yaml -# -# Cluster Config -# -answers: {} -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: false -name: test-35378 -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - authentication: - strategy: x509 - authorization: {} - bastion_host: - ssh_agent_auth: false - cloud_provider: {} - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.14.9-rancher1-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal - restore: - restore: false -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube-api: - always_pull_images: true - audit_log: - enabled: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: 'false' - enable-admission-plugins: >- - ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit - profiling: 'false' - service-account-lookup: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: - - '/opt/kubernetes:/opt/kubernetes' - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube-controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - protect-kernel-defaults: 'true' - fail_swap_on: false - generate_serving_certificate: true - kubeproxy: {} - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -{{% /accordion %}} - -{{% accordion id="k8s-1.15" label="RKE template for k8s 1.15" %}} - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.6-rancher1-2 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -{{% /accordion %}} - -{{% accordion id="k8s-1.16" label="RKE template for k8s 1.16" %}} - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.16.3-rancher1-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -{{% /accordion %}} diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md deleted file mode 100644 index d6bbefc79..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher v2.3.5 -weight: 1 ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.3.5) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 - -### Hardening Guide - -This hardening [guide](./hardening-2.3.5) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md deleted file mode 100644 index 6d0734a8b..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md +++ /dev/null @@ -1,2268 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - v2.3.5 -weight: 205 -aliases: - - /rancher/v2.x/en/security/benchmark-2.3.5 ---- - -### CIS Kubernetes Benchmark v1.5 - Rancher v2.3.5 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.3.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- -## 1 Master Node Security Configuration -### 1.1 Master Node Configuration Files - -#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -chmod 700 /var/lib/etcd -``` - -**Audit Script:** 1.1.11.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a -``` - -**Audit Execution:** - -``` -./1.1.11.sh etcd -``` - -**Expected result**: - -``` -'700' is equal to '700' -``` - -#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). -For example, -``` bash -chown etcd:etcd /var/lib/etcd -``` - -**Audit Script:** 1.1.12.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G -``` - -**Audit Execution:** - -``` -./1.1.12.sh etcd -``` - -**Expected result**: - -``` -'etcd:etcd' is present -``` - -#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chown -R root:root /etc/kubernetes/ssl -``` - -**Audit:** - -``` -stat -c %U:%G /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 644 /etc/kubernetes/ssl -``` - -**Audit Script:** check_files_permissions.sh - -``` -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit -``` - -**Audit Execution:** - -``` -./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' -``` - -**Expected result**: - -``` -'true' is present -``` - -#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 600 /etc/kubernetes/ssl/certs/serverca -``` - -**Audit Script:** 1.1.21.sh - -``` -#!/bin/bash -e -check_dir=${1:-/etc/kubernetes/ssl} - -for file in $(find ${check_dir} -name "*key.pem"); do - file_permission=$(stat -c %a ${file}) - if [[ "${file_permission}" == "600" ]]; then - continue - else - echo "FAIL: ${file} ${file_permission}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./1.1.21.sh /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 1.2 API Server - -#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--basic-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--basic-auth-file' is not present -``` - -#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--token-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--token-auth-file' is not present -``` - -#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the `--kubelet-https` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-https' is present OR '--kubelet-https' is not present -``` - -#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -kubelet client certificate and key parameters as below. - -``` bash ---kubelet-client-certificate= ---kubelet-client-key= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. -`--kubelet-certificate-authority=` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-certificate-authority' is present -``` - -#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. -One such example could be as below. - -``` bash ---authorization-mode=RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' not have 'AlwaysAllow' -``` - -#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'Node' -``` - -#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, -for example: - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'RBAC' -``` - -#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a -value that does not include `AlwaysAdmit`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and ensure that the `--disable-admission-plugins` parameter is set to a -value that does not include `ServiceAccount`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--disable-admission-plugins` parameter to -ensure it does not include `NamespaceLifecycle`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present -``` - -#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `PodSecurityPolicy`: - -``` bash ---enable-admission-plugins=...,PodSecurityPolicy,... -``` - -Then restart the API Server. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `NodeRestriction`. - -``` bash ---enable-admission-plugins=...,NodeRestriction,... -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--insecure-bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--insecure-bind-address' is not present -``` - -#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---insecure-port=0 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--secure-port` parameter or -set it to a different **(non-zero)** desired port. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -6443 is greater than 0 OR '--secure-port' is not present -``` - -#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-path` parameter to a suitable path and -file where you would like audit logs to be written, for example: - -``` bash ---audit-log-path=/var/log/apiserver/audit.log -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--audit-log-path' is present -``` - -#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: - -``` bash ---audit-log-maxage=30 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -30 is greater or equal to 30 -``` - -#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate -value. - -``` bash ---audit-log-maxbackup=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -10 is greater or equal to 10 -``` - -#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. -For example, to set it as `100` **MB**: - -``` bash ---audit-log-maxsize=100 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -100 is greater or equal to 100 -``` - -#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -and set the below parameter as appropriate and if needed. -For example, - -``` bash ---request-timeout=300s -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--request-timeout' is not present OR '--request-timeout' is present -``` - -#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---service-account-lookup=true -``` - -Alternatively, you can delete the `--service-account-lookup` parameter from this file so -that the default takes effect. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--service-account-key-file` parameter -to the public key file for service accounts: - -``` bash ---service-account-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-key-file' is present -``` - -#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the **etcd** certificate and **key** file parameters. - -``` bash ---etcd-certfile= ---etcd-keyfile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the TLS certificate and private key file parameters. - -``` bash ---tls-cert-file= ---tls-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the client certificate authority file. - -``` bash ---client-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the etcd certificate authority file parameter. - -``` bash ---etcd-cafile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-cafile' is present -``` - -#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--encryption-provider-config` parameter to the path of that file: - -``` bash ---encryption-provider-config= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--encryption-provider-config' is present -``` - -#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a `EncryptionConfig` file. -In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. - -**Audit Script:** 1.2.34.sh - -``` -#!/bin/bash -e - -check_file=${1} - -grep -q -E 'aescbc|kms|secretbox' ${check_file} -if [ $? -eq 0 ]; then - echo "--pass" - exit 0 -else - echo "fail: encryption provider found in ${check_file}" - exit 1 -fi -``` - -**Audit Execution:** - -``` -./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 1.3 Controller Manager - -#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, -for example: - -``` bash ---terminated-pod-gc-threshold=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--terminated-pod-gc-threshold' is present -``` - -#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node to set the below parameter. - -``` bash ---use-service-account-credentials=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'true' is not equal to 'false' -``` - -#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--service-account-private-key-file` parameter -to the private key file for service accounts. - -``` bash ---service-account-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-private-key-file' is present -``` - -#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. - -``` bash ---root-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--root-ca-file' is present -``` - -#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' -``` - -#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -### 1.4 Scheduler - -#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -## 2 Etcd Node Configuration -### 2 Etcd Node Configuration Files - -#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` -on the master node and set the below parameters. - -``` bash ---cert-file= ---key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--cert-file' is present AND '--key-file' is present -``` - -#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---client-cert-auth="true" -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--auto-tls` parameter or set it to `false`. - -``` bash - --auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the -master node and set the below parameters. - -``` bash ---peer-client-file= ---peer-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---peer-client-cert-auth=true -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--peer-auto-tls` parameter or set it to `false`. - -``` bash ---peer-auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -## 3 Control Plane Configuration -### 3.2 Logging - -#### 3.2.1 Ensure that a minimal audit policy is created (Scored) - -**Result:** PASS - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit Script:** 3.2.1.sh - -``` -#!/bin/bash -e - -api_server_bin=${1} - -/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep -``` - -**Audit Execution:** - -``` -./3.2.1.sh kube-apiserver -``` - -**Expected result**: - -``` -'--audit-policy-file' is present -``` - -## 4 Worker Node Security Configuration -### 4.1 Worker Node Configuration Files - -#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the file permissions of the - -``` bash ---client-ca-file chmod 644 -``` - -**Audit:** - -``` -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Expected result**: - -``` -'644' is equal to '644' OR '640' is present OR '600' is present -``` - -#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the ownership of the `--client-ca-file`. - -``` bash -chown root:root -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -### 4.2 Kubelet - -#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to -`false`. -If using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---anonymous-auth=false -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If -using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---authorization-mode=Webhook -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'Webhook' not have 'AlwaysAllow' -``` - -#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---client-ca-file= -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---read-only-port=0 -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a -value other than `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---streaming-connection-idle-timeout=5m -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---protect-kernel-defaults=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove the `--make-iptables-util-chains` argument from the -`KUBELET_SYSTEM_PODS_ARGS` variable. -Based on your system, restart the kubelet service. For example: - -```bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` -variable. -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--rotate-certificates' is present OR '--rotate-certificates' is not present -``` - -#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -## 5 Kubernetes Policies -### 5.1 RBAC and Service Accounts - -#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) - -**Result:** PASS - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value - -``` bash -automountServiceAccountToken: false -``` - -**Audit Script:** 5.1.5.sh - -``` -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" - -if [[ "${accounts}" != "" ]]; then - echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" - exit 1 -fi - -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" - -if [[ "${default_binding}" -gt 0 ]]; then - echo "fail: default service accounts have non default bindings" - exit 1 -fi - -echo "--pass" -exit 0 -``` - -**Audit Execution:** - -``` -./5.1.5.sh -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 5.2 Pod Security Policies - -#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostPID` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostIPC` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostNetwork` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -### 5.3 Network Policies and CNI - -#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create `NetworkPolicy` objects as you need them. - -**Audit Script:** 5.3.2.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "fail: ${namespace}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./5.3.2.sh -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 5.6 General Policies - -#### 5.6.4 The default namespace should not be used (Scored) - -**Result:** PASS - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** 5.6.4.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [[ $? -gt 0 ]]; then - echo "fail: kubectl failed" - exit 1 -fi - -default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) - -echo "--count=${default_resources}" -``` - -**Audit Execution:** - -``` -./5.6.4.sh -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - diff --git a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md deleted file mode 100644 index 723a700a6..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md +++ /dev/null @@ -1,715 +0,0 @@ ---- -title: Hardening Guide v2.3.5 -weight: 100 -aliases: - - /rancher/v2.x/en/security/hardening-2.3.5 ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 - - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Hardening_Guide.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.5]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3.5/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -``` -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -``` -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -``` yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace the **default** service account must include this value: - -``` -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -``` yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -``` yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. - -``` yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -kubernetes_version: "v1.15.9-rancher1-1" -enable_network_policy: true -default_pod_security_policy_template_id: "restricted" -services: - etcd: - uid: 52034 - gid: 52034 - kube-api: - pod_security_policy: true - secrets_encryption_config: - enabled: true - audit_log: - enabled: true - admission_configuration: - event_rate_limit: - enabled: true - kube-controller: - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: [] - extra_env: [] - cluster_domain: "" - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] -cluster_name: "" -prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} -restore: - restore: false - snapshot_name: "" -dns: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.x/en/installation) for additional installation and RKE Template details. - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - addons: |- - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - ignore_docker_version: true - kubernetes_version: v1.15.9-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -``` yaml -#cloud-config -packages: - - curl - - jq -runcmd: - - sysctl -w vm.overcommit_memory=1 - - sysctl -w kernel.panic=10 - - sysctl -w kernel.panic_on_oops=1 - - curl https://releases.rancher.com/install-docker/18.09.sh | sh - - usermod -aG docker ubuntu - - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done - - addgroup --gid 52034 etcd - - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -write_files: - - path: /etc/sysctl.d/kubelet.conf - owner: root:root - permissions: "0644" - content: | - vm.overcommit_memory=1 - kernel.panic=10 - kernel.panic_on_oops=1 -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.4/_index.md b/content/rancher/v2.x/en/security/rancher-2.4/_index.md deleted file mode 100644 index 67cda4137..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.4/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher v2.4 -weight: 2 ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.4) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 - -### Hardening Guide - -This hardening [guide](./hardening-2.4) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 diff --git a/content/rancher/v2.x/en/security/rancher-2.4/benchmark-2.4/_index.md b/content/rancher/v2.x/en/security/rancher-2.4/benchmark-2.4/_index.md deleted file mode 100644 index 2f6baa620..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.4/benchmark-2.4/_index.md +++ /dev/null @@ -1,2268 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - v2.4 -weight: 204 -aliases: - - /rancher/v2.x/en/security/benchmark-2.4 ---- - -### CIS Kubernetes Benchmark v1.5 - Rancher v2.4 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- -## 1 Master Node Security Configuration -### 1.1 Master Node Configuration Files - -#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -chmod 700 /var/lib/etcd -``` - -**Audit Script:** 1.1.11.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a -``` - -**Audit Execution:** - -``` -./1.1.11.sh etcd -``` - -**Expected result**: - -``` -'700' is equal to '700' -``` - -#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). -For example, -``` bash -chown etcd:etcd /var/lib/etcd -``` - -**Audit Script:** 1.1.12.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G -``` - -**Audit Execution:** - -``` -./1.1.12.sh etcd -``` - -**Expected result**: - -``` -'etcd:etcd' is present -``` - -#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chown -R root:root /etc/kubernetes/ssl -``` - -**Audit:** - -``` -stat -c %U:%G /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 644 /etc/kubernetes/ssl -``` - -**Audit Script:** check_files_permissions.sh - -``` -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit -``` - -**Audit Execution:** - -``` -./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' -``` - -**Expected result**: - -``` -'true' is present -``` - -#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 600 /etc/kubernetes/ssl/certs/serverca -``` - -**Audit Script:** 1.1.21.sh - -``` -#!/bin/bash -e -check_dir=${1:-/etc/kubernetes/ssl} - -for file in $(find ${check_dir} -name "*key.pem"); do - file_permission=$(stat -c %a ${file}) - if [[ "${file_permission}" == "600" ]]; then - continue - else - echo "FAIL: ${file} ${file_permission}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./1.1.21.sh /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 1.2 API Server - -#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--basic-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--basic-auth-file' is not present -``` - -#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--token-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--token-auth-file' is not present -``` - -#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the `--kubelet-https` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-https' is present OR '--kubelet-https' is not present -``` - -#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -kubelet client certificate and key parameters as below. - -``` bash ---kubelet-client-certificate= ---kubelet-client-key= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. -`--kubelet-certificate-authority=` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-certificate-authority' is present -``` - -#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. -One such example could be as below. - -``` bash ---authorization-mode=RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' not have 'AlwaysAllow' -``` - -#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'Node' -``` - -#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, -for example: - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'RBAC' -``` - -#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a -value that does not include `AlwaysAdmit`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and ensure that the `--disable-admission-plugins` parameter is set to a -value that does not include `ServiceAccount`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--disable-admission-plugins` parameter to -ensure it does not include `NamespaceLifecycle`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present -``` - -#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `PodSecurityPolicy`: - -``` bash ---enable-admission-plugins=...,PodSecurityPolicy,... -``` - -Then restart the API Server. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `NodeRestriction`. - -``` bash ---enable-admission-plugins=...,NodeRestriction,... -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--insecure-bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--insecure-bind-address' is not present -``` - -#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---insecure-port=0 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--secure-port` parameter or -set it to a different **(non-zero)** desired port. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -6443 is greater than 0 OR '--secure-port' is not present -``` - -#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-path` parameter to a suitable path and -file where you would like audit logs to be written, for example: - -``` bash ---audit-log-path=/var/log/apiserver/audit.log -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--audit-log-path' is present -``` - -#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: - -``` bash ---audit-log-maxage=30 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -30 is greater or equal to 30 -``` - -#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate -value. - -``` bash ---audit-log-maxbackup=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -10 is greater or equal to 10 -``` - -#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. -For example, to set it as `100` **MB**: - -``` bash ---audit-log-maxsize=100 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -100 is greater or equal to 100 -``` - -#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -and set the below parameter as appropriate and if needed. -For example, - -``` bash ---request-timeout=300s -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--request-timeout' is not present OR '--request-timeout' is present -``` - -#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---service-account-lookup=true -``` - -Alternatively, you can delete the `--service-account-lookup` parameter from this file so -that the default takes effect. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--service-account-key-file` parameter -to the public key file for service accounts: - -``` bash ---service-account-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-key-file' is present -``` - -#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the **etcd** certificate and **key** file parameters. - -``` bash ---etcd-certfile= ---etcd-keyfile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the TLS certificate and private key file parameters. - -``` bash ---tls-cert-file= ---tls-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the client certificate authority file. - -``` bash ---client-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the etcd certificate authority file parameter. - -``` bash ---etcd-cafile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-cafile' is present -``` - -#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--encryption-provider-config` parameter to the path of that file: - -``` bash ---encryption-provider-config= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--encryption-provider-config' is present -``` - -#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a `EncryptionConfig` file. -In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. - -**Audit Script:** 1.2.34.sh - -``` -#!/bin/bash -e - -check_file=${1} - -grep -q -E 'aescbc|kms|secretbox' ${check_file} -if [ $? -eq 0 ]; then - echo "--pass" - exit 0 -else - echo "fail: encryption provider found in ${check_file}" - exit 1 -fi -``` - -**Audit Execution:** - -``` -./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 1.3 Controller Manager - -#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, -for example: - -``` bash ---terminated-pod-gc-threshold=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--terminated-pod-gc-threshold' is present -``` - -#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node to set the below parameter. - -``` bash ---use-service-account-credentials=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'true' is not equal to 'false' -``` - -#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--service-account-private-key-file` parameter -to the private key file for service accounts. - -``` bash ---service-account-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-private-key-file' is present -``` - -#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. - -``` bash ---root-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--root-ca-file' is present -``` - -#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' -``` - -#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -### 1.4 Scheduler - -#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -## 2 Etcd Node Configuration -### 2 Etcd Node Configuration Files - -#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` -on the master node and set the below parameters. - -``` bash ---cert-file= ---key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--cert-file' is present AND '--key-file' is present -``` - -#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---client-cert-auth="true" -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--auto-tls` parameter or set it to `false`. - -``` bash - --auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the -master node and set the below parameters. - -``` bash ---peer-client-file= ---peer-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---peer-client-cert-auth=true -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--peer-auto-tls` parameter or set it to `false`. - -``` bash ---peer-auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -## 3 Control Plane Configuration -### 3.2 Logging - -#### 3.2.1 Ensure that a minimal audit policy is created (Scored) - -**Result:** PASS - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit Script:** 3.2.1.sh - -``` -#!/bin/bash -e - -api_server_bin=${1} - -/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep -``` - -**Audit Execution:** - -``` -./3.2.1.sh kube-apiserver -``` - -**Expected result**: - -``` -'--audit-policy-file' is present -``` - -## 4 Worker Node Security Configuration -### 4.1 Worker Node Configuration Files - -#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the file permissions of the - -``` bash ---client-ca-file chmod 644 -``` - -**Audit:** - -``` -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Expected result**: - -``` -'644' is equal to '644' OR '640' is present OR '600' is present -``` - -#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the ownership of the `--client-ca-file`. - -``` bash -chown root:root -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -### 4.2 Kubelet - -#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to -`false`. -If using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---anonymous-auth=false -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If -using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---authorization-mode=Webhook -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'Webhook' not have 'AlwaysAllow' -``` - -#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---client-ca-file= -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---read-only-port=0 -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a -value other than `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---streaming-connection-idle-timeout=5m -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---protect-kernel-defaults=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove the `--make-iptables-util-chains` argument from the -`KUBELET_SYSTEM_PODS_ARGS` variable. -Based on your system, restart the kubelet service. For example: - -```bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` -variable. -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--rotate-certificates' is present OR '--rotate-certificates' is not present -``` - -#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -## 5 Kubernetes Policies -### 5.1 RBAC and Service Accounts - -#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) - -**Result:** PASS - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value - -``` bash -automountServiceAccountToken: false -``` - -**Audit Script:** 5.1.5.sh - -``` -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" - -if [[ "${accounts}" != "" ]]; then - echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" - exit 1 -fi - -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" - -if [[ "${default_binding}" -gt 0 ]]; then - echo "fail: default service accounts have non default bindings" - exit 1 -fi - -echo "--pass" -exit 0 -``` - -**Audit Execution:** - -``` -./5.1.5.sh -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 5.2 Pod Security Policies - -#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostPID` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostIPC` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostNetwork` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -### 5.3 Network Policies and CNI - -#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create `NetworkPolicy` objects as you need them. - -**Audit Script:** 5.3.2.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "fail: ${namespace}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./5.3.2.sh -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 5.6 General Policies - -#### 5.6.4 The default namespace should not be used (Scored) - -**Result:** PASS - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** 5.6.4.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [[ $? -gt 0 ]]; then - echo "fail: kubectl failed" - exit 1 -fi - -default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) - -echo "--count=${default_resources}" -``` - -**Audit Execution:** - -``` -./5.6.4.sh -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - diff --git a/content/rancher/v2.x/en/security/rancher-2.4/hardening-2.4/_index.md b/content/rancher/v2.x/en/security/rancher-2.4/hardening-2.4/_index.md deleted file mode 100644 index 89da49bcb..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.4/hardening-2.4/_index.md +++ /dev/null @@ -1,722 +0,0 @@ ---- -title: Hardening Guide v2.4 -weight: 99 -aliases: - - /rancher/v2.x/en/security/hardening-2.4 ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 - - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Hardening_Guide.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.4 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.4]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.4/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -``` -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. - -``` -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -``` yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: - -``` -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -``` yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -``` yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes - - -``` yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -kubernetes_version: "v1.15.9-rancher1-1" -enable_network_policy: true -default_pod_security_policy_template_id: "restricted" -# the nodes directive is required and will vary depending on your environment -# documentation for node configuration can be found here: -# https://rancher.com/docs/rke/latest/en/config-options/nodes -nodes: -services: - etcd: - uid: 52034 - gid: 52034 - kube-api: - pod_security_policy: true - secrets_encryption_config: - enabled: true - audit_log: - enabled: true - admission_configuration: - event_rate_limit: - enabled: true - kube-controller: - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: [] - extra_env: [] - cluster_domain: "" - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] -cluster_name: "" -prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} -restore: - restore: false - snapshot_name: "" -dns: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.x/en/installation) for additional installation and RKE Template details. - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - addons: |- - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - ignore_docker_version: true - kubernetes_version: v1.15.9-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -``` yaml -#cloud-config -packages: - - curl - - jq -runcmd: - - sysctl -w vm.overcommit_memory=1 - - sysctl -w kernel.panic=10 - - sysctl -w kernel.panic_on_oops=1 - - curl https://releases.rancher.com/install-docker/18.09.sh | sh - - usermod -aG docker ubuntu - - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done - - addgroup --gid 52034 etcd - - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -write_files: - - path: /etc/sysctl.d/kubelet.conf - owner: root:root - permissions: "0644" - content: | - vm.overcommit_memory=1 - kernel.panic=10 - kernel.panic_on_oops=1 -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md deleted file mode 100644 index 463446b78..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md +++ /dev/null @@ -1,2265 +0,0 @@ ---- -title: CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5 -weight: 201 ---- - -### CIS v1.5 Kubernetes Benchmark - Rancher v2.5 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.5_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ----------------------------|----------|---------|------- -Hardening Guide with CIS 1.5 Benchmark | Rancher v2.5 | CIS v1.5| Kubernetes v1.15 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- -## 1 Master Node Security Configuration -### 1.1 Master Node Configuration Files - -#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -chmod 700 /var/lib/etcd -``` - -**Audit Script:** 1.1.11.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a -``` - -**Audit Execution:** - -``` -./1.1.11.sh etcd -``` - -**Expected result**: - -``` -'700' is equal to '700' -``` - -#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). -For example, -``` bash -chown etcd:etcd /var/lib/etcd -``` - -**Audit Script:** 1.1.12.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G -``` - -**Audit Execution:** - -``` -./1.1.12.sh etcd -``` - -**Expected result**: - -``` -'etcd:etcd' is present -``` - -#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chown -R root:root /etc/kubernetes/ssl -``` - -**Audit:** - -``` -stat -c %U:%G /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 644 /etc/kubernetes/ssl -``` - -**Audit Script:** check_files_permissions.sh - -``` -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit -``` - -**Audit Execution:** - -``` -./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' -``` - -**Expected result**: - -``` -'true' is present -``` - -#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 600 /etc/kubernetes/ssl/certs/serverca -``` - -**Audit Script:** 1.1.21.sh - -``` -#!/bin/bash -e -check_dir=${1:-/etc/kubernetes/ssl} - -for file in $(find ${check_dir} -name "*key.pem"); do - file_permission=$(stat -c %a ${file}) - if [[ "${file_permission}" == "600" ]]; then - continue - else - echo "FAIL: ${file} ${file_permission}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./1.1.21.sh /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 1.2 API Server - -#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--basic-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--basic-auth-file' is not present -``` - -#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--token-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--token-auth-file' is not present -``` - -#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the `--kubelet-https` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-https' is present OR '--kubelet-https' is not present -``` - -#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -kubelet client certificate and key parameters as below. - -``` bash ---kubelet-client-certificate= ---kubelet-client-key= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. -`--kubelet-certificate-authority=` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-certificate-authority' is present -``` - -#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. -One such example could be as below. - -``` bash ---authorization-mode=RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' not have 'AlwaysAllow' -``` - -#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'Node' -``` - -#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, -for example: - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'RBAC' -``` - -#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a -value that does not include `AlwaysAdmit`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and ensure that the `--disable-admission-plugins` parameter is set to a -value that does not include `ServiceAccount`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--disable-admission-plugins` parameter to -ensure it does not include `NamespaceLifecycle`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present -``` - -#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `PodSecurityPolicy`: - -``` bash ---enable-admission-plugins=...,PodSecurityPolicy,... -``` - -Then restart the API Server. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `NodeRestriction`. - -``` bash ---enable-admission-plugins=...,NodeRestriction,... -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--insecure-bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--insecure-bind-address' is not present -``` - -#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---insecure-port=0 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--secure-port` parameter or -set it to a different **(non-zero)** desired port. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -6443 is greater than 0 OR '--secure-port' is not present -``` - -#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-path` parameter to a suitable path and -file where you would like audit logs to be written, for example: - -``` bash ---audit-log-path=/var/log/apiserver/audit.log -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--audit-log-path' is present -``` - -#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: - -``` bash ---audit-log-maxage=30 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -30 is greater or equal to 30 -``` - -#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate -value. - -``` bash ---audit-log-maxbackup=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -10 is greater or equal to 10 -``` - -#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. -For example, to set it as `100` **MB**: - -``` bash ---audit-log-maxsize=100 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -100 is greater or equal to 100 -``` - -#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -and set the below parameter as appropriate and if needed. -For example, - -``` bash ---request-timeout=300s -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--request-timeout' is not present OR '--request-timeout' is present -``` - -#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---service-account-lookup=true -``` - -Alternatively, you can delete the `--service-account-lookup` parameter from this file so -that the default takes effect. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--service-account-key-file` parameter -to the public key file for service accounts: - -``` bash ---service-account-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-key-file' is present -``` - -#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the **etcd** certificate and **key** file parameters. - -``` bash ---etcd-certfile= ---etcd-keyfile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the TLS certificate and private key file parameters. - -``` bash ---tls-cert-file= ---tls-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the client certificate authority file. - -``` bash ---client-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the etcd certificate authority file parameter. - -``` bash ---etcd-cafile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-cafile' is present -``` - -#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--encryption-provider-config` parameter to the path of that file: - -``` bash ---encryption-provider-config= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--encryption-provider-config' is present -``` - -#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a `EncryptionConfig` file. -In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. - -**Audit Script:** 1.2.34.sh - -``` -#!/bin/bash -e - -check_file=${1} - -grep -q -E 'aescbc|kms|secretbox' ${check_file} -if [ $? -eq 0 ]; then - echo "--pass" - exit 0 -else - echo "fail: encryption provider found in ${check_file}" - exit 1 -fi -``` - -**Audit Execution:** - -``` -./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 1.3 Controller Manager - -#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, -for example: - -``` bash ---terminated-pod-gc-threshold=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--terminated-pod-gc-threshold' is present -``` - -#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node to set the below parameter. - -``` bash ---use-service-account-credentials=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'true' is not equal to 'false' -``` - -#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--service-account-private-key-file` parameter -to the private key file for service accounts. - -``` bash ---service-account-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-private-key-file' is present -``` - -#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. - -``` bash ---root-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--root-ca-file' is present -``` - -#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' -``` - -#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -### 1.4 Scheduler - -#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -## 2 Etcd Node Configuration -### 2 Etcd Node Configuration Files - -#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` -on the master node and set the below parameters. - -``` bash ---cert-file= ---key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--cert-file' is present AND '--key-file' is present -``` - -#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---client-cert-auth="true" -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--auto-tls` parameter or set it to `false`. - -``` bash - --auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the -master node and set the below parameters. - -``` bash ---peer-client-file= ---peer-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---peer-client-cert-auth=true -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--peer-auto-tls` parameter or set it to `false`. - -``` bash ---peer-auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -## 3 Control Plane Configuration -### 3.2 Logging - -#### 3.2.1 Ensure that a minimal audit policy is created (Scored) - -**Result:** PASS - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit Script:** 3.2.1.sh - -``` -#!/bin/bash -e - -api_server_bin=${1} - -/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep -``` - -**Audit Execution:** - -``` -./3.2.1.sh kube-apiserver -``` - -**Expected result**: - -``` -'--audit-policy-file' is present -``` - -## 4 Worker Node Security Configuration -### 4.1 Worker Node Configuration Files - -#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the file permissions of the - -``` bash ---client-ca-file chmod 644 -``` - -**Audit:** - -``` -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Expected result**: - -``` -'644' is equal to '644' OR '640' is present OR '600' is present -``` - -#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the ownership of the `--client-ca-file`. - -``` bash -chown root:root -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -### 4.2 Kubelet - -#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to -`false`. -If using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---anonymous-auth=false -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If -using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---authorization-mode=Webhook -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'Webhook' not have 'AlwaysAllow' -``` - -#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---client-ca-file= -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---read-only-port=0 -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a -value other than `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---streaming-connection-idle-timeout=5m -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---protect-kernel-defaults=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove the `--make-iptables-util-chains` argument from the -`KUBELET_SYSTEM_PODS_ARGS` variable. -Based on your system, restart the kubelet service. For example: - -```bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` -variable. -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--rotate-certificates' is present OR '--rotate-certificates' is not present -``` - -#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -## 5 Kubernetes Policies -### 5.1 RBAC and Service Accounts - -#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) - -**Result:** PASS - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value - -``` bash -automountServiceAccountToken: false -``` - -**Audit Script:** 5.1.5.sh - -``` -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" - -if [[ "${accounts}" != "" ]]; then - echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" - exit 1 -fi - -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" - -if [[ "${default_binding}" -gt 0 ]]; then - echo "fail: default service accounts have non default bindings" - exit 1 -fi - -echo "--pass" -exit 0 -``` - -**Audit Execution:** - -``` -./5.1.5.sh -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 5.2 Pod Security Policies - -#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostPID` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostIPC` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostNetwork` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -### 5.3 Network Policies and CNI - -#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create `NetworkPolicy` objects as you need them. - -**Audit Script:** 5.3.2.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "fail: ${namespace}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./5.3.2.sh -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 5.6 General Policies - -#### 5.6.4 The default namespace should not be used (Scored) - -**Result:** PASS - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** 5.6.4.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [[ $? -gt 0 ]]; then - echo "fail: kubectl failed" - exit 1 -fi - -default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) - -echo "--count=${default_resources}" -``` - -**Audit Execution:** - -``` -./5.6.4.sh -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.5/1.5-hardening-2.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.5/1.5-hardening-2.5/_index.md deleted file mode 100644 index 8a81b7510..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.5/1.5-hardening-2.5/_index.md +++ /dev/null @@ -1,720 +0,0 @@ ---- -title: Hardening Guide with CIS 1.5 Benchmark -weight: 200 ---- - -This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - - Rancher Version | CIS Benchmark Version | Kubernetes Version -----------------|-----------------------|------------------ - Rancher v2.5 | Benchmark v1.5 | Kubernetes 1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_Hardening_Guide_CIS_1.6.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.5 with Kubernetes v1.15 or provisioning a RKE cluster with Kubernetes 1.15 to be used within Rancher v2.5. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5]({{< baseurl >}}/rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -``` -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. - -``` -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -``` yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: - -``` -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -``` yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -``` yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration - -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes - - -``` yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -kubernetes_version: "v1.15.9-rancher1-1" -enable_network_policy: true -default_pod_security_policy_template_id: "restricted" -# the nodes directive is required and will vary depending on your environment -# documentation for node configuration can be found here: -# https://rancher.com/docs/rke/latest/en/config-options/nodes -nodes: -services: - etcd: - uid: 52034 - gid: 52034 - kube-api: - pod_security_policy: true - secrets_encryption_config: - enabled: true - audit_log: - enabled: true - admission_configuration: - event_rate_limit: - enabled: true - kube-controller: - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: [] - extra_env: [] - cluster_domain: "" - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] -cluster_name: "" -prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} -restore: - restore: false - snapshot_name: "" -dns: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.x/en/installation) for additional installation and RKE Template details. - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - addons: |- - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - ignore_docker_version: true - kubernetes_version: v1.15.9-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -``` yaml -#cloud-config -packages: - - curl - - jq -runcmd: - - sysctl -w vm.overcommit_memory=1 - - sysctl -w kernel.panic=10 - - sysctl -w kernel.panic_on_oops=1 - - curl https://releases.rancher.com/install-docker/18.09.sh | sh - - usermod -aG docker ubuntu - - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done - - addgroup --gid 52034 etcd - - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -write_files: - - path: /etc/sysctl.d/kubelet.conf - owner: root:root - permissions: "0644" - content: | - vm.overcommit_memory=1 - kernel.panic=10 - kernel.panic_on_oops=1 -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md deleted file mode 100644 index d7803779e..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md +++ /dev/null @@ -1,3317 +0,0 @@ ---- -title: CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4 -weight: 101 ---- - -### CIS 1.6 Kubernetes Benchmark - Rancher v2.5.4 with Kubernetes v1.18 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.6_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.5.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ----------------------------|----------|---------|------- -Hardening Guide with CIS 1.6 Benchmark | Rancher v2.5.4 | CIS 1.6| Kubernetes v1.18 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark 1.6. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -### Controls - -## 1.1 Etcd Node Configuration Files -### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, -chmod 700 /var/lib/etcd - - -**Audit:** - -```bash -stat -c %a /node/var/lib/etcd -``` - -**Expected Result**: - -```console -'700' is equal to '700' -``` - -**Returned Value**: - -```console -700 - -``` -### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). -For example, chown etcd:etcd /var/lib/etcd - -A system service account is required for etcd data directory ownership. -Refer to Rancher's hardening guide for more details on how to configure this ownership. - - -**Audit:** - -```bash -stat -c %U:%G /node/var/lib/etcd -``` - -**Expected Result**: - -```console -'etcd:etcd' is present -``` - -**Returned Value**: - -```console -etcd:etcd - -``` -### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown -R root:root /etc/kubernetes/pki/ - - -**Audit:** - -```bash -check_files_owner_in_dir.sh /node/etc/kubernetes/ssl -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to ensure the owner is set to root:root for -# the given directory and all the files in it -# -# inputs: -# $1 = /full/path/to/directory -# -# outputs: -# true/false - -INPUT_DIR=$1 - -if [[ "${INPUT_DIR}" == "" ]]; then - echo "false" - exit -fi - -if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then - echo "false" - exit -fi - -statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) -while read -r statInfoLine; do - f=$(echo ${statInfoLine} | cut -d' ' -f1) - p=$(echo ${statInfoLine} | cut -d' ' -f2) - - if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then - if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "root:root" ]]; then - echo "false" - exit - fi - fi -done <<< "${statInfoLines}" - - -echo "true" -exit - -``` -**Returned Value**: - -```console -true - -``` -### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 644 /etc/kubernetes/pki/*.crt - - -**Audit:** - -```bash -check_files_permissions.sh /node/etc/kubernetes/ssl/!(*key).pem -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` -**Returned Value**: - -```console -true - -``` -### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 600 /etc/kubernetes/ssl/*key.pem - - -**Audit:** - -```bash -check_files_permissions.sh /node/etc/kubernetes/ssl/*key.pem 600 -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` -**Returned Value**: - -```console -true - -``` -### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-apiserver.yaml; fi' -``` - - -### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml; fi' -``` - - -### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-controller-manager.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-controller-manager.yaml; fi' -``` - - -### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-controller-manager.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml; fi' -``` - - -### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-scheduler.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-scheduler.yaml; fi' -``` - - -### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-scheduler.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml; fi' -``` - - -### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/etcd.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/etcd.yaml; fi' -``` - - -### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/etcd.yaml; then stat -c %U:%G /etc/kubernetes/manifests/etcd.yaml; fi' -``` - - -### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 - - -**Audit:** - -```bash -stat -c permissions=%a -``` - - -### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root - - -**Audit:** - -```bash -stat -c %U:%G -``` - - -### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi' -``` - - -### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi' -``` - - -### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e scheduler; then stat -c permissions=%a scheduler; fi' -``` - - -### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e scheduler; then stat -c %U:%G scheduler; fi' -``` - - -### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e controllermanager; then stat -c permissions=%a controllermanager; fi' -``` - - -### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e controllermanager; then stat -c %U:%G controllermanager; fi' -``` - - -## 1.2 API Server -### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---anonymous-auth=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --basic-auth-file= parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--basic-auth-file' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --token-auth-file= parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--token-auth-file' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --kubelet-https parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-https' is not present OR '--kubelet-https' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the -kubelet client certificate and key parameters as below. ---kubelet-client-certificate= ---kubelet-client-key= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the ---kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. ---kubelet-certificate-authority= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-certificate-authority' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. -One such example could be as below. ---authorization-mode=RBAC - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console - 'Node,RBAC' not have 'AlwaysAllow' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes Node. ---authorization-mode=Node,RBAC - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'Node' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes RBAC, -for example: ---authorization-mode=Node,RBAC - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'RBAC' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set the desired limits in a configuration file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameters. ---enable-admission-plugins=...,EventRateLimit,... ---admission-control-config-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'EventRateLimit' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --enable-admission-plugins parameter, or set it to a -value that does not include AlwaysAdmit. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console - 'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -AlwaysPullImages. ---enable-admission-plugins=...,AlwaysPullImages,... - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - - -### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -SecurityContextDeny, unless PodSecurityPolicy is already in place. ---enable-admission-plugins=...,SecurityContextDeny,... - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - - -### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and ensure that the --disable-admission-plugins parameter is set to a -value that does not include ServiceAccount. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --disable-admission-plugins parameter to -ensure it does not include NamespaceLifecycle. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes PodSecurityPolicy: ---enable-admission-plugins=...,PodSecurityPolicy,... -Then restart the API Server. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes NodeRestriction. ---enable-admission-plugins=...,NodeRestriction,... - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --insecure-bind-address parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--insecure-bind-address' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---insecure-port=0 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'0' is equal to '0' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --secure-port parameter or -set it to a different (non-zero) desired port. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -6443 is greater than 0 OR '--secure-port' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.21 Ensure that the --profiling argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---profiling=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-path parameter to a suitable path and -file where you would like audit logs to be written, for example: ---audit-log-path=/var/log/apiserver/audit.log - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-log-path' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: ---audit-log-maxage=30 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -30 is greater or equal to 30 -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate -value. ---audit-log-maxbackup=10 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -10 is greater or equal to 10 -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. -For example, to set it as 100 MB: ---audit-log-maxsize=100 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -100 is greater or equal to 100 -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameter as appropriate and if needed. -For example, ---request-timeout=300s - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--request-timeout' is not present OR '--request-timeout' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---service-account-lookup=true -Alternatively, you can delete the --service-account-lookup parameter from this file so -that the default takes effect. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --service-account-key-file parameter -to the public key file for service accounts: ---service-account-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-key-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate and key file parameters. ---etcd-certfile= ---etcd-keyfile= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the TLS certificate and private key file parameters. ---tls-cert-file= ---tls-private-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the client certificate authority file. ---client-ca-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--client-ca-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate authority file parameter. ---etcd-cafile= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-cafile' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--encryption-provider-config' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -In this file, choose aescbc, kms or secretbox as the encryption provider. - - -**Audit:** - -```bash -check_encryption_provider_config.sh aescbc kms secretbox -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to check the encrption provider config is set to aesbc -# -# outputs: -# true/false - -# TODO: Figure out the file location from the kube-apiserver commandline args -ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" - -if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then - echo "false" - exit -fi - -for provider in "$@" -do - if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then - echo "true" - exit - fi -done - -echo "false" -exit - -``` -**Returned Value**: - -```console - - aescbc: -true - -``` -### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM -_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM -_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM -_SHA384 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - - -## 1.3 Controller Manager -### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, -for example: ---terminated-pod-gc-threshold=10 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--terminated-pod-gc-threshold' is present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.2 Ensure that the --profiling argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the below parameter. ---profiling=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node to set the below parameter. ---use-service-account-credentials=true - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'true' is not equal to 'false' -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --service-account-private-key-file parameter -to the private key file for service accounts. ---service-account-private-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-private-key-file' is present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --root-ca-file parameter to the certificate bundle file`. ---root-ca-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--root-ca-file' is present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) - -**Result:** notApplicable - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. ---feature-gates=RotateKubeletServerCertificate=true - -Cluster provisioned by RKE handles certificate rotation directly through RKE. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - - -### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and ensure the correct value for the --bind-address parameter - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--bind-address' is not present OR '--bind-address' is not present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -## 1.4 Scheduler -### 1.4.1 Ensure that the --profiling argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file -on the master node and set the below parameter. ---profiling=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4947 4930 1 16:16 ? 00:00:02 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --address=0.0.0.0 - -``` -### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml -on the master node and ensure the correct value for the --bind-address parameter - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'--bind-address' is not present OR '--bind-address' is not present -``` - -**Returned Value**: - -```console -root 4947 4930 1 16:16 ? 00:00:02 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --address=0.0.0.0 - -``` -## 2 Etcd Node Configuration Files -### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml -on the master node and set the below parameters. ---cert-file= ---key-file= - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--cert-file' is present AND '--key-file' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and set the below parameter. ---client-cert-auth="true" - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--client-cert-auth' is present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the -master node and set the below parameters. ---peer-client-file= ---peer-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and set the below parameter. ---peer-client-cert-auth=true - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-client-cert-auth' is present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and either remove the --peer-auto-tls parameter or set it to false. ---peer-auto-tls=false - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) - -**Result:** pass - -**Remediation:** -[Manual test] -Follow the etcd documentation and create a dedicated certificate authority setup for the -etcd service. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the -master node and set the below parameter. ---trusted-ca-file= - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--trusted-ca-file' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -## 3.1 Authentication and Authorization -### 3.1.1 Client certificate authentication should not be used for users (Manual) - -**Result:** warn - -**Remediation:** -Alternative mechanisms provided by Kubernetes such as the use of OIDC should be -implemented in place of client certificates. - - -**Audit:** - -```bash - -``` - - -## 3.2 Logging -### 3.2.1 Ensure that a minimal audit policy is created (Automated) - -**Result:** pass - -**Remediation:** -Create an audit policy file for your cluster. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-policy-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) - -**Result:** warn - -**Remediation:** -Consider modification of the audit policy in use on the cluster to include these items, at a -minimum. - - -**Audit:** - -```bash - -``` - - -## 4.1 Worker Node Configuration Files -### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c permissions=%a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' -``` - - -### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c %U:%G /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' -``` - - -### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 $proykubeconfig - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected Result**: - -```console -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -**Returned Value**: - -```console -600 - -``` -### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is not present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present -``` - -### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root - -``` -### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the following command to modify the file permissions of the ---client-ca-file chmod 644 - - -**Audit:** - -```bash -check_cafile_permissions.sh -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') -if test -z $CAFILE; then CAFILE=$kubeletcafile; fi -if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi - -``` -### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the following command to modify the ownership of the --client-ca-file. -chown root:root - - -**Audit:** - -```bash -check_cafile_ownership.sh -``` - -**Expected Result**: - -```console -'root:root' is not present -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') -if test -z $CAFILE; then CAFILE=$kubeletcafile; fi -if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi - -``` -### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chmod 644 /var/lib/kubelet/config.yaml - -Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c permissions=%a /var/lib/kubelet/config.yaml; fi' -``` - - -### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chown root:root /var/lib/kubelet/config.yaml - -Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %U:%G /var/lib/kubelet/config.yaml; fi' -``` - - -## 4.2 Kubelet -### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to -false. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---anonymous-auth=false -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If -using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---authorization-mode=Webhook -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---client-ca-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set readOnlyPort to 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---read-only-port=0 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present OR '' is not present -``` - -### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a -value other than 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---streaming-connection-idle-timeout=5m -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD -root 5103 5086 7 16:16 ? 00:00:12 kubelet --resolv-conf=/etc/resolv.conf --read-only-port=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --feature-gates=RotateKubeletServerCertificate=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --make-iptables-util-chains=true --streaming-connection-idle-timeout=30m --cluster-dns=10.43.0.10 --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225-key.pem --address=0.0.0.0 --cni-bin-dir=/opt/cni/bin --anonymous-auth=false --protect-kernel-defaults=true --cloud-provider= --hostname-override=cis-aio-0 --fail-swap-on=false --cgroups-per-qos=True --authentication-token-webhook=true --event-qps=0 --v=2 --pod-infra-container-image=rancher/pause:3.1 --authorization-mode=Webhook --network-plugin=cni --cluster-domain=cluster.local --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --volume-plugin-dir=/var/lib/kubelet/volumeplugins --cni-conf-dir=/etc/cni/net.d --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225.pem --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf - -``` -### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set protectKernelDefaults: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---protect-kernel-defaults=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove the --make-iptables-util-chains argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present OR '' is not present -``` - -### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) - -**Result:** notApplicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and remove the --hostname-override argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - - -### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set tlsCertFile to the location -of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile -to the location of the corresponding private key file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameters in KUBELET_CERTIFICATE_ARGS variable. ---tls-cert-file= ---tls-private-key-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present AND '' is not present -``` - -### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to add the line rotateCertificates: true or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS -variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'--rotate-certificates' is not present OR '--rotate-certificates' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD -root 5103 5086 6 16:16 ? 00:00:12 kubelet --resolv-conf=/etc/resolv.conf --read-only-port=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --feature-gates=RotateKubeletServerCertificate=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --make-iptables-util-chains=true --streaming-connection-idle-timeout=30m --cluster-dns=10.43.0.10 --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225-key.pem --address=0.0.0.0 --cni-bin-dir=/opt/cni/bin --anonymous-auth=false --protect-kernel-defaults=true --cloud-provider= --hostname-override=cis-aio-0 --fail-swap-on=false --cgroups-per-qos=True --authentication-token-webhook=true --event-qps=0 --v=2 --pod-infra-container-image=rancher/pause:3.1 --authorization-mode=Webhook --network-plugin=cni --cluster-domain=cluster.local --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --volume-plugin-dir=/var/lib/kubelet/volumeplugins --cni-conf-dir=/etc/cni/net.d --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225.pem --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf - -``` -### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) - -**Result:** notApplicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. ---feature-gates=RotateKubeletServerCertificate=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -Clusters provisioned by RKE handles certificate rotation directly through RKE. - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - - -### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set TLSCipherSuites: to -TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -or to a subset of these values. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the --tls-cipher-suites parameter as follows, or to a subset of these values. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -## 5.1 RBAC and Service Accounts -### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) - -**Result:** warn - -**Remediation:** -Identify all clusterrolebindings to the cluster-admin role. Check if they are used and -if they need this role or if they could use a role with fewer privileges. -Where possible, first bind users to a lower privileged role and then remove the -clusterrolebinding to the cluster-admin role : -kubectl delete clusterrolebinding [name] - - -**Audit:** - -```bash - -``` - - -### 5.1.2 Minimize access to secrets (Manual) - -**Result:** warn - -**Remediation:** -Where possible, remove get, list and watch access to secret objects in the cluster. - - -**Audit:** - -```bash - -``` - - -### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) - -**Result:** warn - -**Remediation:** -Where possible replace any use of wildcards in clusterroles and roles with specific -objects or actions. - - -**Audit:** - -```bash - -``` - - -### 5.1.4 Minimize access to create pods (Manual) - -**Result:** warn - -**Remediation:** -Where possible, remove create access to pod objects in the cluster. - - -**Audit:** - -```bash - -``` - - -### 5.1.5 Ensure that default service accounts are not actively used. (Automated) - -**Result:** pass - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value -automountServiceAccountToken: false - - -**Audit:** - -```bash -check_for_default_sa.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) -if [[ ${count_sa} -gt 0 ]]; then - echo "false" - exit -fi - -for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") -do - for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') - do - read kind name <<<$(IFS=","; echo $result) - resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) - if [[ ${resource_count} -gt 0 ]]; then - echo "false" - exit - fi - done -done - - -echo "true" -``` -**Returned Value**: - -```console -true - -``` -### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) - -**Result:** warn - -**Remediation:** -Modify the definition of pods and service accounts which do not need to mount service -account tokens to disable it. - - -**Audit:** - -```bash - -``` - - -## 5.2 Pod Security Policies -### 5.2.1 Minimize the admission of privileged containers (Manual) - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that -the .spec.privileged field is omitted or set to false. - - -**Audit:** - -```bash - -``` - - -### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostPID field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostIPC field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostNetwork field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.allowPrivilegeEscalation field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.6 Minimize the admission of root containers (Manual) - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of -UIDs not including 0. - - -**Audit:** - -```bash - -``` - - -### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - - -**Audit:** - -```bash - -``` - - -### 5.2.8 Minimize the admission of containers with added capabilities (Manual) - -**Result:** warn - -**Remediation:** -Ensure that allowedCapabilities is not present in PSPs for the cluster unless -it is set to an empty array. - - -**Audit:** - -```bash - -``` - - -### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) - -**Result:** warn - -**Remediation:** -Review the use of capabilites in applications runnning on your cluster. Where a namespace -contains applicaions which do not require any Linux capabities to operate consider adding -a PSP which forbids the admission of containers which do not drop all capabilities. - - -**Audit:** - -```bash - -``` - - -## 5.3 Network Policies and CNI -### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) - -**Result:** warn - -**Remediation:** -If the CNI plugin in use does not support network policies, consideration should be given to -making use of a different plugin, or finding an alternate mechanism for restricting traffic -in the Kubernetes cluster. - - -**Audit:** - -```bash - -``` - - -### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and create NetworkPolicy objects as you need them. - - -**Audit:** - -```bash -check_for_network_policies.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [[ ${policy_count} -eq 0 ]]; then - echo "false" - exit - fi -done - -echo "true" - -``` -**Returned Value**: - -```console -true - -``` -## 5.4 Secrets Management -### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) - -**Result:** warn - -**Remediation:** -if possible, rewrite application code to read secrets from mounted secret files, rather than -from environment variables. - - -**Audit:** - -```bash - -``` - - -### 5.4.2 Consider external secret storage (Manual) - -**Result:** warn - -**Remediation:** -Refer to the secrets management options offered by your cloud provider or a third-party -secrets management solution. - - -**Audit:** - -```bash - -``` - - -## 5.5 Extensible Admission Control -### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and setup image provenance. - - -**Audit:** - -```bash - -``` - - -## 5.7 General Policies -### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) - -**Result:** warn - -**Remediation:** -Follow the documentation and create namespaces for objects in your deployment as you need -them. - - -**Audit:** - -```bash - -``` - - -### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) - -**Result:** warn - -**Remediation:** -Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you -would need to enable alpha features in the apiserver by passing "--feature- -gates=AllAlpha=true" argument. -Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS -parameter to "--feature-gates=AllAlpha=true" -KUBE_API_ARGS="--feature-gates=AllAlpha=true" -Based on your system, restart the kube-apiserver service. For example: -systemctl restart kube-apiserver.service -Use annotations to enable the docker/default seccomp profile in your pod definitions. An -example is as below: -apiVersion: v1 -kind: Pod -metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default -spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - - -**Audit:** - -```bash - -``` - - -### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and apply security contexts to your pods. For a -suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker -Containers. - - -**Audit:** - -```bash - -``` - - -### 5.7.4 The default namespace should not be used (Automated) - -**Result:** pass - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - - -**Audit:** - -```bash -check_for_default_ns.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) -if [[ ${count} -gt 0 ]]; then - echo "false" - exit -fi - -echo "true" - - -``` -**Returned Value**: - -```console -true - -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/_index.md deleted file mode 100644 index 82970d87e..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/_index.md +++ /dev/null @@ -1,571 +0,0 @@ ---- -title: Hardening Guide with CIS 1.6 Benchmark -weight: 100 ---- - -This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - - Rancher Version | CIS Benchmark Version | Kubernetes Version -----------------|-----------------------|------------------ - Rancher v2.5.4 | Benchmark 1.6 | Kubernetes v1.18 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_Hardening_Guide_CIS_1.6.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.5.4 with Kubernetes v1.18 or provisioning a RKE cluster with Kubernetes v1.18 to be used within Rancher v2.5.4. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4]({{< baseurl >}}/rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.6 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.6 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -- Migration Rancher from 2.4 to 2.5. Addons were removed in HG 2.5, and therefore namespaces on migration may be not created on the downstream clusters. Pod may fail to run because of missing namesapce like ingress-nginx, cattlae-system. - - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -```ini -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. - -```bash -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -```yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: - -```yaml -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -```bash -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -```yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -```bash -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` - -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration - -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes - - -```yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -# https://rancher.com/docs/rke/latest/en/installation/ - -# the nodes directive is required and will vary depending on your environment -# documentation for node configuration can be found here: -# https://rancher.com/docs/rke/latest/en/config-options/nodes -nodes: [] -services: - etcd: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - external_urls: [] - ca_cert: "" - cert: "" - key: "" - path: "" - uid: 52034 - gid: 52034 - snapshot: false - retention: "" - creation: "" - backup_config: null - kube-api: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - service_cluster_ip_range: "" - service_node_port_range: "" - pod_security_policy: true - always_pull_images: false - secrets_encryption_config: - enabled: true - custom_config: null - audit_log: - enabled: true - configuration: null - admission_configuration: null - event_rate_limit: - enabled: true - configuration: null - kube-controller: - image: "" - extra_args: - feature-gates: RotateKubeletServerCertificate=true - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - cluster_cidr: "" - service_cluster_ip_range: "" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - kubelet: - image: "" - extra_args: - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - cluster_domain: cluster.local - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - generate_serving_certificate: true - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} - update_strategy: null -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: default-allow-all - spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: default - automountServiceAccountToken: false -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - nodelocal: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_controllers: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -kubernetes_version: v1.18.12-rancher1-1 -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] - update_strategy: null - http_port: 0 - https_port: 0 - network_mode: "" -cluster_name: -cloud_provider: - name: "" -prefix_path: "" -win_prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} - update_strategy: null - replicas: null -restore: - restore: false - snapshot_name: "" -dns: null -upgrade_strategy: - max_unavailable_worker: "" - max_unavailable_controlplane: "" - drain: null - node_drain_input: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.x/en/installation) for additional installation and RKE Template details. - -```yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 45 - ignore_docker_version: true - kubernetes_version: v1.18.12-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal - rotate_encryption_key: false -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - feature-gates: RotateKubeletServerCertificate=true - kubelet: - extra_args: - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - ssh_agent_auth: false - upgrade_strategy: - max_unavailable_controlplane: '1' - max_unavailable_worker: 10% -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 20.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -```yaml -#cloud-config -apt: - sources: - docker.list: - source: deb [arch=amd64] http://download.docker.com/linux/ubuntu $RELEASE stable - keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 -system_info: - default_user: - groups: - - docker -write_files: -- path: "/etc/apt/preferences.d/docker" - owner: root:root - permissions: '0600' - content: | - Package: docker-ce - Pin: version 5:19* - Pin-Priority: 800 -- path: "/etc/sysctl.d/90-kubelet.conf" - owner: root:root - permissions: '0644' - content: | - vm.overcommit_memory=1 - vm.panic_on_oom=0 - kernel.panic=10 - kernel.panic_on_oops=1 - kernel.keys.root_maxbytes=25000000 -package_update: true -packages: -- docker-ce -- docker-ce-cli -- containerd.io -runcmd: -- sysctl -p /etc/sysctl.d/90-kubelet.conf -- groupadd --gid 52034 etcd -- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` diff --git a/content/rancher/v2.x/en/security/rancher-2.5/_index.md b/content/rancher/v2.x/en/security/rancher-2.5/_index.md deleted file mode 100644 index afff1ac7d..000000000 --- a/content/rancher/v2.x/en/security/rancher-2.5/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Rancher v2.5 -weight: 1 ---- - -Rancher v2.5 introduced the capability to deploy Rancher on any Kubernetes cluster. For that reason, we now provide separate security hardening guides for Rancher deployments on each of Rancher's Kubernetes distributions. - -Rancher has the following Kubernetes distributions: - -- [**RKE,**]({{}}/rke/latest/en/) Rancher Kubernetes Engine, is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. -- [**K3s,**]({{}}/k3s/latest/en/) is a fully conformant, lightweight Kubernetes distribution. It is easy to install, with half the memory of upstream Kubernetes, all in a binary of less than 100 MB. -- [**RKE2**](https://docs.rke2.io/) is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. - -To harden a Kubernetes cluster outside of Rancher's distributions, refer to your Kubernetes provider docs. - -# Guides - -These guides have been tested along with the Rancher v2.5 release. Each self-assessment guide is accompanied with a hardening guide and tested on a specific Kubernetes version and CIS benchmark version. If a CIS benchmark has not been validated for your Kubernetes version, you can choose to use the existing guides until a newer version is added. - -### RKE Guides - -Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides ----|---|---|--- -Kubernetes v1.15+ | CIS v1.5 | [Link](./1.5-benchmark-2.5) | [Link](./1.5-hardening-2.5) -Kubernetes v1.18+ | CIS v1.6 | [Link](./1.6-benchmark-2.5) | [Link](./1.6-hardening-2.5) - -### RKE2 Guides - -Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides ----|---|---|--- -Kubernetes v1.18 | CIS v1.5 | [Link](https://docs.rke2.io/security/cis_self_assessment15/) | [Link](https://docs.rke2.io/security/hardening_guide/) -Kubernetes v1.20 | CIS v1.6 | [Link](https://docs.rke2.io/security/cis_self_assessment16/) | [Link](https://docs.rke2.io/security/hardening_guide/) - -### K3s Guides - -The K3s security guides will be added soon. diff --git a/content/rancher/v2.x/en/security/security-scan/_index.md b/content/rancher/v2.x/en/security/security-scan/_index.md deleted file mode 100644 index 644b7f906..000000000 --- a/content/rancher/v2.x/en/security/security-scan/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Security Scans -weight: 299 ---- - -The documentation about CIS security scans has moved [here.]({{}}/rancher/v2.x/en/cis-scans) diff --git a/content/rancher/v2.x/en/system-tools/_index.md b/content/rancher/v2.x/en/system-tools/_index.md deleted file mode 100644 index c268e997a..000000000 --- a/content/rancher/v2.x/en/system-tools/_index.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: System Tools -weight: 22 ---- - -System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [installations of Rancher on an RKE cluster.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) The tasks include: - -* Collect logging and system metrics from nodes. -* Remove Kubernetes resources created by Rancher. - -The following commands are available: - -| Command | Description -|---|--- -| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. -| [stats](#stats) | Stream system metrics from nodes. -| [remove](#remove) | Remove Kubernetes resources created by Rancher. - -# Download System Tools - -You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. - -Operating System | Filename ------------------|----- -MacOS | `system-tools_darwin-amd64` -Linux | `system-tools_linux-amd64` -Windows | `system-tools_windows-amd64.exe` - -After you download the tools, complete the following actions: - -1. Rename the file to `system-tools`. - -1. Give the file executable permissions by running the following command: - - > **Using Windows?** - The file is already an executable, you can skip this step. - - ``` - chmod +x system-tools - ``` - -# Logs - -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/). See [Troubleshooting]({{}}//rancher/v2.x/en/troubleshooting/) for a list of core Kubernetes cluster components. - -System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. - -### Usage - -``` -./system-tools_darwin-amd64 logs --kubeconfig -``` - -The following are the options for the logs command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. -| `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. - -# Stats - -The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/). - -System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. - -### Usage - -``` -./system-tools_darwin-amd64 stats --kubeconfig -``` - -The following are the options for the stats command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. -| `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. - -# Remove - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.x/en/backups/backups) before executing the command. - -When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: - -- The Rancher deployment namespace (`cattle-system` by default). -- Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates as of v2.1.0. -- Labels, annotations, and finalizers. -- Rancher Deployment. -- Machines, clusters, projects, and user custom resource deployments (CRDs). -- All resources create under the `management.cattle.io` API Group. -- All CRDs created by Rancher v2.x. - ->**Using 2.0.8 or Earlier?** -> ->These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. - -### Usage - -When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.x/en/backups/backups) before executing the command. - -``` -./system-tools remove --kubeconfig --namespace -``` - -The following are the options for the `remove` command: - -| Option | Description -| ---------------------------------------------- | ------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file -| `--namespace , -n cattle-system` | Rancher 2.x deployment namespace (``). If no namespace is defined, the options defaults to `cattle-system`. -| `--force` | Skips the interactive removal confirmation and removes the Rancher deployment without prompt. diff --git a/content/rancher/v2.x/en/troubleshooting/_index.md b/content/rancher/v2.x/en/troubleshooting/_index.md deleted file mode 100644 index b30b15908..000000000 --- a/content/rancher/v2.x/en/troubleshooting/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Troubleshooting -weight: 26 ---- - -This section contains information to help you troubleshoot issues when using Rancher. - -- [Kubernetes components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/) - - If you need help troubleshooting core Kubernetes cluster components like: - * `etcd` - * `kube-apiserver` - * `kube-controller-manager` - * `kube-scheduler` - * `kubelet` - * `kube-proxy` - * `nginx-proxy` - -- [Kubernetes resources]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/) - - Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. - -- [Networking]({{}}/rancher/v2.x/en/troubleshooting/networking/) - - Steps to troubleshoot networking issues can be found here. - -- [DNS]({{}}/rancher/v2.x/en/troubleshooting/dns/) - - When you experience name resolution issues in your cluster. - -- [Troubleshooting Rancher installed on Kubernetes]({{}}/rancher/v2.x/en/troubleshooting/rancherha/) - - If you experience issues with your [Rancher server installed on Kubernetes]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) - -- [Imported clusters]({{}}/rancher/v2.x/en/troubleshooting/imported-clusters/) - - If you experience issues when [Importing Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) - -- [Logging]({{}}/rancher/v2.x/en/troubleshooting/logging/) - - Read more about what log levels can be configured and how to configure a log level. - diff --git a/content/rancher/v2.x/en/troubleshooting/dns/_index.md b/content/rancher/v2.x/en/troubleshooting/dns/_index.md deleted file mode 100644 index 4acec0b3c..000000000 --- a/content/rancher/v2.x/en/troubleshooting/dns/_index.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: DNS -weight: 103 ---- - -The commands/steps listed on this page can be used to check name resolution issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -Before running the DNS checks, check the [default DNS provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{}}/rancher/v2.x/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. - -### Check if DNS pods are running - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns -``` - -Example output when using CoreDNS: -``` -NAME READY STATUS RESTARTS AGE -coredns-799dffd9c4-6jhlz 1/1 Running 0 76m -``` - -Example output when using kube-dns: -``` -NAME READY STATUS RESTARTS AGE -kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s -``` - -### Check if the DNS service is present with the correct cluster-ip - -``` -kubectl -n kube-system get svc -l k8s-app=kube-dns -``` - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s -``` - -### Check if domain names are resolving - -Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: kubernetes.default -Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local -pod "busybox" deleted -``` - -Check if external names are resolving (in this example, `www.google.com`) - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: www.google.com -Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net -Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net -pod "busybox" deleted -``` - -If you want to check resolving of domain names on all of the hosts, execute the following steps: - -1. Save the following file as `ds-dnstest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: dnstest - spec: - selector: - matchLabels: - name: dnstest - template: - metadata: - labels: - name: dnstest - spec: - tolerations: - - operator: Exists - containers: - - image: busybox:1.28 - imagePullPolicy: Always - name: alpine - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl create -f ds-dnstest.yml` -3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. -4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). - - ``` - export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start DNS resolve test - => End DNS resolve test - ``` - -If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. - -Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. - -``` -=> Start DNS resolve test -command terminated with exit code 1 -209.97.182.150 cannot resolve www.google.com -=> End DNS resolve test -``` - -Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. - -### CoreDNS specific - -#### Check CoreDNS logging - -``` -kubectl -n kube-system logs -l k8s-app=kube-dns -``` - -#### Check configuration - -CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. - -``` -kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} -``` - -#### Check upstream nameservers in resolv.conf - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. - -``` -kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' -``` - -#### Enable query logging - -Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: - -``` -kubectl get configmap -n kube-system coredns -o json | kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - -``` - -All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). - -### kube-dns specific - -#### Check upstream nameservers in kubedns container - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). - -Use the following command to check the upstream nameservers used by the kubedns container: - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done -``` - -Example output: -``` -Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x -nameserver 1.1.1.1 -nameserver 8.8.4.4 -``` - -If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: - -* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. -* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): - -``` -services: - kubelet: - extra_args: - resolv-conf: "/run/resolvconf/resolv.conf" -``` - -> **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. - -See [Editing Cluster as YAML]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: - -``` -kubectl delete pods -n kube-system -l k8s-app=kube-dns -pod "kube-dns-5fd74c7488-6pwsf" deleted -``` - -Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). - -If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: - -``` -kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' -``` - -Example output: -``` -upstreamNameservers:["1.1.1.1"] -``` diff --git a/content/rancher/v2.x/en/troubleshooting/imported-clusters/_index.md b/content/rancher/v2.x/en/troubleshooting/imported-clusters/_index.md deleted file mode 100644 index 87aba226c..000000000 --- a/content/rancher/v2.x/en/troubleshooting/imported-clusters/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Registered clusters -weight: 105 ---- - -The commands/steps listed on this page can be used to check clusters that you are importing or that are imported in Rancher. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) - -### Rancher agents - -Communication to the cluster (Kubernetes API via cattle-cluster-agent) and communication to the nodes is done through Rancher agents. - -If the cattle-cluster-agent cannot connect to the configured `server-url`, the cluster will remain in **Pending** state, showing `Waiting for full cluster configuration`. - -#### cattle-node-agent - -> Note: Starting in Rancher 2.5 cattle-node-agents are only present in clusters created in Rancher with RKE. - -Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 -cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 -cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 -cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 -cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 -cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 -cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 -``` - -Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: - -``` -kubectl -n cattle-system logs -l app=cattle-agent -``` - -#### cattle-cluster-agent - -Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 -``` - -Check logging of cattle-cluster-agent pod: - -``` -kubectl -n cattle-system logs -l app=cattle-cluster-agent -``` diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md deleted file mode 100644 index d2e32f915..000000000 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Kubernetes Components -weight: 100 ---- - -The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. - -This section includes troubleshooting tips in the following categories: - -- [Troubleshooting etcd Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd) -- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane) -- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy) -- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic) - -# Kubernetes Component Diagram - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md deleted file mode 100644 index 1ca42591c..000000000 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Troubleshooting Controlplane Nodes -weight: 2 ---- - -This section applies to nodes with the `controlplane` role. - -# Check if the Controlplane Containers are Running - -There are three specific containers launched on nodes with the `controlplane` role: - -* `kube-apiserver` -* `kube-controller-manager` -* `kube-scheduler` - -The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver -f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler -bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager -``` - -# Controlplane Container Logging - -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kube-apiserver -docker logs kube-controller-manager -docker logs kube-scheduler -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md deleted file mode 100644 index f83d241a0..000000000 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -title: Troubleshooting etcd Nodes -weight: 1 ---- - -This section contains commands and tips for troubleshooting nodes with the `etcd` role. - -This page covers the following topics: - -- [Checking if the etcd Container is Running](#checking-if-the-etcd-container-is-running) -- [etcd Container Logging](#etcd-container-logging) -- [etcd Cluster and Connectivity Checks](#etcd-cluster-and-connectivity-checks) - - [Check etcd Members on all Nodes](#check-etcd-members-on-all-nodes) - - [Check Endpoint Status](#check-endpoint-status) - - [Check Endpoint Health](#check-endpoint-health) - - [Check Connectivity on Port TCP/2379](#check-connectivity-on-port-tcp-2379) - - [Check Connectivity on Port TCP/2380](#check-connectivity-on-port-tcp-2380) -- [etcd Alarms](#etcd-alarms) -- [etcd Space Errors](#etcd-space-errors) -- [Log Level](#log-level) -- [etcd Content](#etcd-content) - - [Watch Streaming Events](#watch-streaming-events) - - [Query etcd Directly](#query-etcd-directly) -- [Replacing Unhealthy etcd Nodes](#replacing-unhealthy-etcd-nodes) - -# Checking if the etcd Container is Running - -The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name=etcd$ -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd -``` - -# etcd Container Logging - -The logging of the container can contain information on what the problem could be. - -``` -docker logs etcd -``` -| Log | Explanation | -|-----|------------------| -| `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` | A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. | -| `xxx is starting a new election at term x` | The etcd cluster has lost its quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. | -| `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` | The host firewall is preventing network communication. | -| `rafthttp: request cluster ID mismatch` | The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. | -| `rafthttp: failed to find member` | The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. - -# etcd Cluster and Connectivity Checks - -The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) - -### Check etcd Members on all Nodes - -Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. - -Command: -``` -docker exec etcd etcdctl member list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" -``` - -Example output: -``` -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -``` - -### Check Endpoint Status - -The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint status --write-out table -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | -| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | -| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -### Check Endpoint Health - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint health -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Example output: -``` -https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms -``` - -### Check Connectivity on Port TCP/2379 - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health" - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Example output: -``` -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -``` - -### Check Connectivity on Port TCP/2380 - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Example output: -``` -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -``` - -# etcd Alarms - -etcd will trigger alarms, for instance when it runs out of space. - -Command: -``` -docker exec etcd etcdctl alarm list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output when NOSPACE alarm is triggered: -``` -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -``` - -# etcd Space Errors - -Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. - -Resolutions: - -- [Compact the Keyspace](#compact-the-keyspace) -- [Defrag All etcd Members](#defrag-all-etcd-members) -- [Check Endpoint Status](#check-endpoint-status) -- [Disarm Alarm](#disarm-alarm) - -### Compact the Keyspace - -Command: -``` -rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') -docker exec etcd etcdctl compact "$rev" -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" -``` - -Example output: -``` -compacted revision xxx -``` - -### Defrag All etcd Members - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl defrag -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" -``` - -Example output: -``` -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -``` - -### Check Endpoint Status - -Command: -``` -docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint status --write-out table -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -### Disarm Alarm - -After verifying that the DB size went down after compaction and defragmenting, the alarm needs to be disarmed for etcd to allow writes again. - -Command: -``` -docker exec etcd etcdctl alarm list -docker exec etcd etcdctl alarm disarm -docker exec etcd etcdctl alarm list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm disarm" -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output: -``` -docker exec etcd etcdctl alarm list -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -docker exec etcd etcdctl alarm disarm -docker exec etcd etcdctl alarm list -``` - -# Log Level - -The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. - -Command: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log -``` - -To reset the log level back to the default (`INFO`), you can use the following command. - -Command: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log -``` - -# etcd Content - -If you want to investigate the contents of your etcd, you can either watch streaming events or you can query etcd directly, see below for examples. - -### Watch Streaming Events - -Command: -``` -docker exec etcd etcdctl watch --prefix /registry -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT watch --prefix /registry -``` - -If you only want to see the affected keys (and not the binary data), you can append `| grep -a ^/registry` to the command to filter for keys only. - -### Query etcd Directly - -Command: -``` -docker exec etcd etcdctl get /registry --prefix=true --keys-only -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT get /registry --prefix=true --keys-only -``` - -You can process the data to get a summary of count per key, using the command below: - -``` -docker exec etcd etcdctl get /registry --prefix=true --keys-only | grep -v ^$ | awk -F'/' '{ if ($3 ~ /cattle.io/) {h[$3"/"$4]++} else { h[$3]++ }} END { for(k in h) print h[k], k }' | sort -nr -``` - -# Replacing Unhealthy etcd Nodes - -When a node in your etcd cluster becomes unhealthy, the recommended approach is to fix or remove the failed or unhealthy node before adding a new etcd node to the cluster. diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md deleted file mode 100644 index 70505e962..000000000 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Troubleshooting nginx-proxy -weight: 3 ---- - -The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. - -# Check if the Container is Running - -The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name=nginx-proxy -``` - -Example output: - -``` -docker ps -a -f=name=nginx-proxy -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy -``` - -# Check Generated NGINX Configuration - -The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: - -``` -docker exec nginx-proxy cat /etc/nginx/nginx.conf -``` - -Example output: -``` -error_log stderr notice; - -worker_processes auto; -events { - multi_accept on; - use epoll; - worker_connections 1024; -} - -stream { - upstream kube_apiserver { - - server ip_of_controlplane_node1:6443; - - server ip_of_controlplane_node2:6443; - - } - - server { - listen 6443; - proxy_pass kube_apiserver; - proxy_timeout 30; - proxy_connect_timeout 2s; - - } - -} -``` - -# nginx-proxy Container Logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs nginx-proxy -``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md deleted file mode 100644 index 28ee4499b..000000000 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Troubleshooting Worker Nodes and Generic Components -weight: 4 ---- - -This section applies to every node as it includes components that run on nodes with any role. - -# Check if the Containers are Running - -There are two specific containers launched on nodes with the `worker` role: - -* kubelet -* kube-proxy - -The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name='kubelet|kube-proxy' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy -a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet -``` - -# Container Logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kubelet -docker logs kube-proxy -``` diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md deleted file mode 100644 index e76ad558c..000000000 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Kubernetes resources -weight: 101 ---- - -The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -- [Nodes](#nodes) - - [Get nodes](#get-nodes) - - [Get node conditions](#get-node-conditions) -- [Kubernetes leader election](#kubernetes-leader-election) - - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) - - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) -- [Ingress controller](#ingress-controller) - - [Pod details](#pod-details) - - [Pod container logs](#pod-container-logs) - - [Namespace events](#namespace-events) - - [Debug logging](#debug-logging) - - [Check configuration](#check-configuration) -- [Rancher agents](#rancher-agents) - - [cattle-node-agent](#cattle-node-agent) - - [cattle-cluster-agent](#cattle-cluster-agent) -- [Jobs and pods](#jobs-and-pods) - - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) - - [Describe pod](#describe-pod) - - [Pod container logs](#pod-container-logs) - - [Describe job](#describe-job) - - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) - - [Evicted pods](#evicted-pods) - - [Job does not complete](#job-does-not-complete) - -# Nodes - -### Get nodes - -Run the command below and check the following: - -- All nodes in your cluster should be listed, make sure there is not one missing. -- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) -- Check if all nodes report the correct version. -- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) - - -``` -kubectl get nodes -o wide -``` - -Example output: - -``` -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -``` - -### Get node conditions - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' -``` - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' -``` - -Example output: - -``` -worker-0: DiskPressure:True -``` - -# Kubernetes leader election - -### Kubernetes Controller Manager leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -### Kubernetes Scheduler leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -# Ingress Controller - -The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. - -Check if the pods are running on all nodes: - -``` -kubectl -n ingress-nginx get pods -o wide -``` - -Example output: - -``` -kubectl -n ingress-nginx get pods -o wide -NAME READY STATUS RESTARTS AGE IP NODE -default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 -nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 -nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 -``` - -If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. - -### Pod details - -``` -kubectl -n ingress-nginx describe pods -l app=ingress-nginx -``` - -### Pod container logs - -``` -kubectl -n ingress-nginx logs -l app=ingress-nginx -``` - -### Namespace events - -``` -kubectl -n ingress-nginx get events -``` - -### Debug logging - -To enable debug logging: - -``` -kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' -``` - -### Check configuration - -Retrieve generated configuration in each pod: - -``` -kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done -``` - -# Rancher agents - -Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. - -#### cattle-node-agent - -Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 -cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 -cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 -cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 -cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 -cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 -cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 -``` - -Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: - -``` -kubectl -n cattle-system logs -l app=cattle-agent -``` - -#### cattle-cluster-agent - -Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 -``` - -Check logging of cattle-cluster-agent pod: - -``` -kubectl -n cattle-system logs -l app=cattle-cluster-agent -``` - -# Jobs and Pods - -### Check that pods or jobs have status **Running**/**Completed** - -To check, run the command: - -``` -kubectl get pods --all-namespaces -``` - -If a pod is not in **Running** state, you can dig into the root cause by running: - -### Describe pod - -``` -kubectl describe pod POD_NAME -n NAMESPACE -``` - -### Pod container logs - -``` -kubectl logs POD_NAME -n NAMESPACE -``` - -If a job is not in **Completed** state, you can dig into the root cause by running: - -### Describe job - -``` -kubectl describe job JOB_NAME -n NAMESPACE -``` - -### Logs from the containers of pods of the job - -``` -kubectl logs -l job-name=JOB_NAME -n NAMESPACE -``` - -### Evicted pods - -Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). - -Retrieve a list of evicted pods (podname and namespace): - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' -``` - -To delete all evicted pods: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done -``` - -Retrieve a list of evicted pods, scheduled node and the reason: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done -``` - -### Job does not complete - -If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.](../../cluster-admin/tools/istio/setup/enable-istio-in-namespace/#excluding-workloads-from-being-injected-with-the-istio-sidecar) - -Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/logging/_index.md b/content/rancher/v2.x/en/troubleshooting/logging/_index.md deleted file mode 100644 index 0c038f81e..000000000 --- a/content/rancher/v2.x/en/troubleshooting/logging/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Logging -weight: 110 ---- - -The following log levels are used in Rancher: - -| Name | Description | -|---------|-------------| -| `info` | Logs informational messages. This is the default log level. | -| `debug` | Logs more detailed messages that can be used to debug. | -| `trace` | Logs very detailed messages on internal functions. This is very verbose and can contain sensitive information. | - -### How to configure a log level - -* Kubernetes install - * Configure debug log level -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl -n cattle-system get pods -l app=rancher --no-headers -o custom-columns=name:.metadata.name | while read rancherpod; do kubectl -n cattle-system exec $rancherpod -c rancher -- loglevel --set debug; done -OK -OK -OK -$ kubectl -n cattle-system logs -l app=rancher -c rancher -``` - - * Configure info log level -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl -n cattle-system get pods -l app=rancher --no-headers -o custom-columns=name:.metadata.name | while read rancherpod; do kubectl -n cattle-system exec $rancherpod -c rancher -- loglevel --set info; done -OK -OK -OK -``` - -* Docker Install - * Configure debug log level -``` -$ docker exec -ti loglevel --set debug -OK -$ docker logs -f -``` - - * Configure info log level -``` -$ docker exec -ti loglevel --set info -OK -``` diff --git a/content/rancher/v2.x/en/troubleshooting/networking/_index.md b/content/rancher/v2.x/en/troubleshooting/networking/_index.md deleted file mode 100644 index 1b13f8dfe..000000000 --- a/content/rancher/v2.x/en/troubleshooting/networking/_index.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Networking -weight: 102 ---- - -The commands/steps listed on this page can be used to check networking related issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -### Double check if all the required ports are opened in your (host) firewall - -Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. -### Check if overlay network is functioning correctly - -The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. - -To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `swiss-army-knife` container on every host (image was developed by Rancher engineers and can be found here: https://github.com/rancherlabs/swiss-army-knife), which we will use to run a `ping` test between containers on all hosts. - -1. Save the following file as `overlaytest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: overlaytest - spec: - selector: - matchLabels: - name: overlaytest - template: - metadata: - labels: - name: overlaytest - spec: - tolerations: - - operator: Exists - containers: - - image: rancherlabs/swiss-army-knife - imagePullPolicy: Always - name: overlaytest - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - - ``` - -2. Launch it using `kubectl create -f overlaytest.yml` -3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. -4. Run the following script, from the same location. It will have each `overlaytest` container on every host ping each other: - ``` - #!/bin/bash - echo "=> Start network overlay test" - kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | - while read spod shost - do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | - while read tip thost - do kubectl --request-timeout='10s' exec $spod -c overlaytest -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1" - RC=$? - if [ $RC -ne 0 ] - then echo FAIL: $spod on $shost cannot reach pod IP $tip on $thost - else echo $shost can reach $thost - fi - done - done - echo "=> End network overlay test" - ``` - -5. When this command has finished running, it will output the state of each route: - - ``` - => Start network overlay test - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.7.3 on wk2 - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.0.5 on cp1 - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.2.12 on wk1 - command terminated with exit code 1 - FAIL: overlaytest-v4qkl on cp1 cannot reach pod IP 10.42.7.3 on wk2 - cp1 can reach cp1 - cp1 can reach wk1 - command terminated with exit code 1 - FAIL: overlaytest-xpxwp on wk1 cannot reach pod IP 10.42.7.3 on wk2 - wk1 can reach cp1 - wk1 can reach wk1 - => End network overlay test - ``` - If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened for `wk2`. -6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. - - -### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices - -When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: - -* `websocket: bad handshake` -* `Failed to connect to proxy` -* `read tcp: i/o timeout` - -See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. - -### Resolved issues - -#### Overlay network broken when using Canal/Flannel due to missing node annotations - -| | | -|------------|------------| -| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | -| Resolved in | v2.1.2 | - -To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): - -``` -kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' -``` - -If there is no output, the cluster is not affected. - -#### System namespace pods network connectivity broken - -> **Note:** This applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. - -| | | -|------------|------------| -| GitHub issue | [#15146](https://github.com/rancher/rancher/issues/15146) | - -If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration]({{}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: - -- NGINX ingress controller showing `504 Gateway Time-out` when accessed. -- NGINX ingress controller logging `upstream timed out (110: Connection timed out) while connecting to upstream` when accessed. diff --git a/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md b/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md deleted file mode 100644 index a30b664c9..000000000 --- a/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Rancher HA -weight: 104 ---- - -The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml`). - -### Check Rancher pods - -Rancher pods are deployed as a Deployment in the `cattle-system` namespace. - -Check if the pods are running on all nodes: - -``` -kubectl -n cattle-system get pods -l app=rancher -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -rancher-7dbd7875f7-n6t5t 1/1 Running 0 8m x.x.x.x x.x.x.x -rancher-7dbd7875f7-qbj5k 1/1 Running 0 8m x.x.x.x x.x.x.x -rancher-7dbd7875f7-qw7wb 1/1 Running 0 8m x.x.x.x x.x.x.x -``` - -If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. - -#### Pod details - -``` -kubectl -n cattle-system describe pods -l app=rancher -``` - -#### Pod container logs - -``` -kubectl -n cattle-system logs -l app=rancher -``` - -#### Namespace events - -``` -kubectl -n cattle-system get events -``` - -### Check ingress - -Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (host address(es) it will be routed to). - -``` -kubectl -n cattle-system get ingress -``` - -Example output: - -``` -NAME HOSTS ADDRESS PORTS AGE -rancher rancher.yourdomain.com x.x.x.x,x.x.x.x,x.x.x.x 80, 443 2m -``` - -### Check ingress controller logs - -When accessing your configured Rancher FQDN does not show you the UI, check the ingress controller logging to see what happens when you try to access Rancher: - -``` -kubectl -n ingress-nginx logs -l app=ingress-nginx -``` - -### Leader election - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `cattle-controllers` ConfigMap (in this example, `rancher-7dbd7875f7-qbj5k`). - -``` -kubectl -n kube-system get configmap cattle-controllers -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"rancher-7dbd7875f7-qbj5k","leaseDurationSeconds":45,"acquireTime":"2019-04-04T11:53:12Z","renewTime":"2019-04-04T12:24:08Z","leaderTransitions":0} -``` - diff --git a/content/rancher/v2.x/en/user-settings/_index.md b/content/rancher/v2.x/en/user-settings/_index.md deleted file mode 100644 index ba1a3bc6d..000000000 --- a/content/rancher/v2.x/en/user-settings/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: User Settings -weight: 23 -aliases: - - /rancher/v2.x/en/tasks/user-settings/ ---- - -Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. - -![User Settings Menu]({{}}/img/rancher/user-settings.png) - -The available user settings are: - -- [API & Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. -- [Cloud Credentials]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). Note: Available as of v2.2.0. -- [Node Templates]({{}}/rancher/v2.x/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). -- [Preferences]({{}}/rancher/v2.x/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. -- Log Out: Ends your user session. diff --git a/content/rancher/v2.x/en/user-settings/api-keys/_index.md b/content/rancher/v2.x/en/user-settings/api-keys/_index.md deleted file mode 100644 index aa83abb72..000000000 --- a/content/rancher/v2.x/en/user-settings/api-keys/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: API Keys -weight: 7005 -aliases: - - /rancher/v2.x/en/concepts/api-keys/ - - /rancher/v2.x/en/tasks/user-settings/api-keys/ ---- - -## API Keys and User Authentication - -If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. - -An API key is also required for using Rancher CLI. - -API Keys are composed of four components: - -- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. -- **Access Key:** The token's username. -- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. -- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. - -## Creating an API Key - -1. Select **User Avatar** > **API & Keys** from the **User Settings** menu in the upper-right. - -2. Click **Add Key**. - -3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. - - The API key won't be valid after expiration. Shorter expiration periods are more secure. - - _Available as of v2.4.6_ - Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. - - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. - -4. Click **Create**. - - **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. - - Use the **Bearer Token** to authenticate with Rancher CLI. - -5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. - -## What's Next? - -- Enter your API key information into the application that will send requests to the Rancher API. -- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. -- API keys are used for API calls and [Rancher CLI]({{}}/rancher/v2.x/en/cli). - -## Deleting API Keys - -If you need to revoke an API key, delete it. You should delete API keys: - -- That may have been compromised. -- That have expired. - -To delete an API, select the stale key and click **Delete**. diff --git a/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md b/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md deleted file mode 100644 index 148f8f678..000000000 --- a/content/rancher/v2.x/en/user-settings/cloud-credentials/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Managing Cloud Credentials -weight: 7011 ---- - -_Available as of v2.2.0_ - -When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. - -Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. - -Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. - -You can create cloud credentials in two contexts: - -- [During creation of a node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. -- In the **User Settings** - -All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. - -## Creating a Cloud Credential from User Settings - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Click **Add Cloud Credential**. -1. Enter a name for the cloud credential. -1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) in Rancher. -1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. -1. Click **Create**. - -**Result:** The cloud credential is created and can immediately be used to [create node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). - -## Updating a Cloud Credential - -When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. -1. Update the credential information and click **Save**. - -**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). - -## Deleting a Cloud Credential - -In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{}}/rancher/v2.x/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. You can either individually delete a cloud credential or bulk delete. - - - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. - - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. -1. Confirm that you want to delete these cloud credentials. diff --git a/content/rancher/v2.x/en/user-settings/node-templates/_index.md b/content/rancher/v2.x/en/user-settings/node-templates/_index.md deleted file mode 100644 index 0b6f411fc..000000000 --- a/content/rancher/v2.x/en/user-settings/node-templates/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Managing Node Templates -weight: 7010 ---- - -When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: - -- While [provisioning a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). -- At any time, from your [user settings](#creating-a-node-template-from-user-settings). - -When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. - -## Creating a Node Template from User Settings - -1. From your user settings, select **User Avatar > Node Templates**. -1. Click **Add Template**. -1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. - -**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). - -## Updating a Node Template - -1. From your user settings, select **User Avatar > Node Templates**. -1. Choose the node template that you want to edit and click the **⋮ > Edit**. - - > **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -1. Edit the required information and click **Save**. - -**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. - -## Cloning Node Templates - -When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Find the template you want to clone. Then select **⋮ > Clone**. -1. Complete the rest of the form. - -**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools). - -## Deleting a Node Template - -When you no longer use a node template, you can delete it from your user settings. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/content/rancher/v2.x/en/user-settings/preferences/_index.md b/content/rancher/v2.x/en/user-settings/preferences/_index.md deleted file mode 100644 index fc2fe8c1f..000000000 --- a/content/rancher/v2.x/en/user-settings/preferences/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: User Preferences -weight: 7012 ---- - -Each user can choose preferences to personalize their Rancher experience. To change preference settings, open the **User Settings** menu and then select **Preferences**. - -## Theme - -Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. - -## My Account - -This section displays the **Name** (your display name) and **Username** (your login) used for your session. To change your login's current password, click the **Change Password** button. - -## Table Row per Page - -On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. diff --git a/content/rancher/v2.x/en/v1.6-migration/_index.md b/content/rancher/v2.x/en/v1.6-migration/_index.md deleted file mode 100644 index 27bb78e7b..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Migrating from v1.6 to v2.x -weight: 28 ---- - -Rancher v2.x has been rearchitected and rewritten with the goal of providing a complete management solution for Kubernetes and Docker. Due to these extensive changes, there is no direct upgrade path from v1.6 to v2.x, but rather a migration of your v1.6 services into v2.x as Kubernetes workloads. In v1.6, the most common orchestration used was Rancher's own engine called Cattle. The following guide explains and educates our Cattle users on running workloads in a Kubernetes environment. - -## Video - -This video demonstrates a complete walk through of migration from Rancher v1.6 to v2.x. - -{{< youtube OIifcqj5Srw >}} - -## Migration Plan - ->**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction]({{}}/rancher/v2.x/en/v1.6-migration/kub-intro). - - -- [1. Get Started]({{}}/rancher/v2.x/en/v1.6-migration/get-started) - - >**Already a Kubernetes user in v1.6?** - > - > _Get Started_ is the only section you need to review for migration to v2.x. You can skip everything else. -- [2. Migrate Your Services]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) -- [3. Expose Your Services]({{}}/rancher/v2.x/en/v1.6-migration/expose-services/) -- [4. Configure Health Checks]({{}}/rancher/v2.x/en/v1.6-migration/monitor-apps) -- [5. Schedule Your Services]({{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/) -- [6. Service Discovery]({{}}/rancher/v2.x/en/v1.6-migration/discover-services/) -- [7. Load Balancing]({{}}/rancher/v2.x/en/v1.6-migration/load-balancing/) - - -## Migration Example Files - -Throughout this migration guide, we will reference several example services from Rancher v1.6 that we're migrating to v2.x. These services are: - -- A service named `web`, which runs [Let's Chat](http://sdelements.github.io/lets-chat/), a self-hosted chat for small teams. -- A service named `database`, which runs [Mongo DB](https://www.mongodb.com/), an open source document database. -- A service named `webLB`, which runs [HAProxy](http://www.haproxy.org/), an open source load balancer used in Rancher v1.6. - -During migration, we'll export these services from Rancher v1.6. The export generates a unique directory for each Rancher v1.6 environment and stack, and two files are output into each stack's directory: - -- `docker-compose.yml` - - A file that contains standard Docker directives for each service in your stack. We'll be converting these files to Kubernetes manifests that can be read by Rancher v2.x. - -- `rancher-compose.yml` - - A file for Rancher-specific functionality such as health checks and load balancers. These files cannot be read by Rancher v2.x, so don't worry about their contents—we're discarding them and recreating them using the v2.x UI. - - -### [Next: Get Started]({{}}/rancher/v2.x/en/v1.6-migration/get-started) diff --git a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md deleted file mode 100644 index 74147a826..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "6. Service Discovery" -weight: 600 ---- - -Service discovery is one of the core functionalities of any container-based environment. Once you have packaged and launched your application, the next step is making it discoverable to other containers in your environment or the external world. This document will describe how to use the service discovery support provided by Rancher v2.x so that you can find them by name. - -This document will also show you how to link the workloads and services that you migrated into Rancher v2.x. When you parsed your services from v1.6 using migration-tools CLI, it output two files for each service: one deployment manifest and one service manifest. You'll have to link these two files together before the deployment works correctly in v2.x. - -
Resolve the output.txt Link Directive
- -![Resolve Link Directive]({{}}/img/rancher/resolve-links.png) - -## In This Document - - - - -- [Service Discovery: Rancher v1.6 vs. v2.x](#service-discovery-rancher-v1-6-vs-v2-x) -- [Service Discovery Within and Across Namespaces](#service-discovery-within-and-across-namespaces) -- [Container Discovery](#container-discovery) -- [Service Name Alias Creation](#service-name-alias-creation) - - - -## Service Discovery: Rancher v1.6 vs. v2.x - -For Rancher v2.x, we've replaced the Rancher DNS microservice used in v1.6 with native [Kubernetes DNS support](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/), which provides equivalent service discovery for Kubernetes workloads and pods. Former Cattle users can replicate all the service discovery features from Rancher v1.6 in v2.x. There's no loss of functionality. - -Kubernetes schedules a DNS pod and service in the cluster, which is similar to the [Rancher v1.6 DNS microservice]({{}}/rancher/v1.6/en/cattle/internal-dns-service/#internal-dns-service-in-cattle-environments). Kubernetes then configures its kubelets to route all DNS lookups to this DNS service, which is skyDNS, a flavor of the default Kube-DNS implementation. - -The following table displays each service discovery feature available in the two Rancher releases. - -Service Discovery Feature | Rancher v1.6 | Rancher v2.x | Description ---------------------------|--------------|--------------|------------- -[service discovery within and across stack][1] (i.e., clusters) | ✓ | ✓ | All services in the stack are resolvable by `` and by `.` across stacks. -[container discovery][2] | ✓ | ✓ | All containers are resolvable globally by their name. -[service alias name creation][3] | ✓ | ✓ | Adding an alias name to services and linking to other services using aliases. -[discovery of external services][4] | ✓ | ✓ | Pointing to services deployed outside of Rancher using the external IP(s) or a domain name. - -[1]: #service-discovery-within-and-across-stacks -[2]: #container-discovery -[3]: #service-name-alias-creation -[4]: #service-name-alias-creation - -
- -### Service Discovery Within and Across Namespaces - - -When you create a _new_ workload in v2.x (not migrated, more on that [below](#linking-migrated-workloads-and-services)), Rancher automatically creates a service with an identical name, and then links the service and workload together. If you don't explicitly expose a port, the default port of `42` is used. This practice makes the workload discoverable within and across namespaces by its name. - -### Container Discovery - -Individual pods running in the Kubernetes cluster also get a DNS record assigned, which uses dot notation as well: `..pod.cluster.local`. For example, a pod with an IP of `10.42.2.7` in the namespace `default` with a DNS name of `cluster.local` would have an entry of `10-42-2-7.default.pod.cluster.local`. - -Pods can also be resolved using the `hostname` and `subdomain` fields if set in the pod spec. Details about this resolution is covered in the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). - -### Linking Migrated Workloads and Services - -When you migrate v1.6 services to v2.x, Rancher does not automatically create a Kubernetes service record for each migrated deployment. Instead, you'll have to link the deployment and service together manually, using any of the methods listed below. - -In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are linked together. - -
Linked Workload and Kubernetes Service
- -![Linked Workload and Kubernetes Service]({{}}/img/rancher/linked-service-workload.png) - - -### Service Name Alias Creation - -Just as you can create an alias for Rancher v1.6 services, you can do the same for Rancher v2.x workloads. Similarly, you can also create DNS records pointing to services running externally, using either their hostname or IP address. These DNS records are Kubernetes service objects. - -Using the v2.x UI, use the context menu to navigate to the `Project` view. Then click **Resources > Workloads > Service Discovery.** (In versions before v2.3.0, click the **Workloads > Service Discovery** tab.) All existing DNS records created for your workloads are listed under each namespace. - -Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods. - -
Add Service Discovery Record
-![Add Service Discovery Record]({{}}/img/rancher/add-record.png) - -The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. - -Option | Kubernetes-implemented? | Rancher-implemented? --------|-------------------------|--------------------- -Pointing to an external hostname | ✓ | | -Pointing to a set of pods that match a selector | ✓ | | -Pointing to an external IP address | | ✓ -Pointing to another workload | | ✓ -Create alias for another DNS record | | ✓ - - -### [Next: Load Balancing]({{}}/rancher/v2.x/en/v1.6-migration/load-balancing/) diff --git a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md deleted file mode 100644 index 5e7207b16..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "3. Expose Your Services" -weight: 400 ---- - -In testing environments, you usually need to route external traffic to your cluster containers by using an unadvertised IP and port number, providing users access to their apps. You can accomplish this goal using port mapping, which exposes a workload (i.e., service) publicly over a specific port, provided you know your node IP address(es). You can either map a port using HostPorts (which exposes a service on a specified port on a single node) or NodePorts (which exposes a service on _all_ nodes on a single port). - -Use this document to correct workloads that list `ports` in `output.txt`. You can correct it by either setting a HostPort or a NodePort. - -
Resolve ports for the web Workload
- -![Resolve Ports]({{}}/img/rancher/resolve-ports.png) - - -## In This Document - - - -- [What's Different About Exposing Services in Rancher v2.x?](#what-s-different-about-exposing-services-in-rancher-v2-x) -- [HostPorts](#hostport) -- [Setting HostPort](#setting-hostport) -- [NodePorts](#nodeport) -- [Setting NodePort](#setting-nodeport) - - - -## What's Different About Exposing Services in Rancher v2.x? - -In Rancher v1.6, we used the term _Port Mapping_ for exposing an IP address and port where your you and your users can access a service. - -In Rancher v2.x, the mechanisms and terms for service exposure have changed and expanded. You now have two port mapping options: _HostPorts_ (which is most synonymous with v1.6 port mapping, allows you to expose your app at a single IP and port) and _NodePorts_ (which allows you to map ports on _all_ of your cluster nodes, not just one). - -Unfortunately, port mapping cannot be parsed by the migration-tools CLI. If the services you're migrating from v1.6 to v2.x have port mappings set, you'll have to either set a [HostPort](#hostport) or [NodePort](#nodeport) as a replacement. - -## HostPort - -A _HostPort_ is a port exposed to the public on a _specific node_ running one or more pod. Traffic to the node and the exposed port (`:`) are routed to the requested container's private port. Using a HostPort for a Kubernetes pod in Rancher v2.x is synonymous with creating a public port mapping for a container in Rancher v1.6. - -In the following diagram, a user is trying to access an instance of Nginx, which is running within a pod on port 80. However, the Nginx deployment is assigned a HostPort of 9890. The user can connect to this pod by browsing to its host IP address, followed by the HostPort in use (9890 in case). - -![HostPort Diagram]({{}}/img/rancher/hostPort.svg) - - -#### HostPort Pros - -- Any port available on the host can be exposed. -- Configuration is simple, and the HostPort is set directly in the Kubernetes pod specifications. Unlike NodePort, no other objects need to be created to expose your app. - -#### HostPort Cons - -- Limits the scheduling options for your pod, as only hosts with vacancies for your chosen port can be used. -- If the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. -- Any two workloads that specify the same HostPort cannot be deployed to the same node. -- If the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods—Kubernetes reschedules them to a different node. - -## Setting HostPort - -You can set a HostPort for migrated workloads (i.e., services) using the Rancher v2.x UI. To add a HostPort, browse to the project containing your workloads, and edit each workload that you want to expose, as shown below. Map the port that your service container exposes to the HostPort exposed on your target node. - -For example, for the web-deployment.yml file parsed from v1.6 that we've been using as a sample, we would edit its Kubernetes manifest, set the publish the port that the container uses, and then declare a HostPort listening on the port of your choice (`9890`) as shown below. You can then access your workload by clicking the link created in the Rancher UI. - -
Port Mapping: Setting HostPort
- -{{< img "/img/rancher/set-hostport.gif" "Set HostPort">}} - -## NodePort - -A _NodePort_ is a port that's open to the public _on each_ of your cluster nodes. When the NodePort receives a request for any of the cluster hosts' IP address for the set NodePort value, NodePort (which is a Kubernetes service) routes traffic to a specific pod, regardless of what node it's running on. NodePort provides a static endpoint where external requests can reliably reach your pods. - -NodePorts help you circumvent an IP address shortcoming. Although pods can be reached by their IP addresses, they are disposable by nature. Pods are routinely destroyed and recreated, getting a new IP address with each replication. Therefore, IP addresses are not a reliable way to access your pods. NodePorts help you around this issue by providing a static service where they can always be reached. Even if your pods change their IP addresses, external clients dependent on them can continue accessing them without disruption, all without any knowledge of the pod re-creation occurring on the back end. - -In the following diagram, a user is trying to connect to an instance of Nginx running in a Kubernetes cluster managed by Rancher. Although he knows what NodePort Nginx is operating on (30216 in this case), he does not know the IP address of the specific node that the pod is running on. However, with NodePort enabled, he can connect to the pod using the IP address for _any_ node in the cluster. Kubeproxy will forward the request to the correct node and pod. - -![NodePort Diagram]({{}}/img/rancher/nodePort.svg) - -NodePorts are available within your Kubernetes cluster on an internal IP. If you want to expose pods external to the cluster, use NodePorts in conjunction with an external load balancer. Traffic requests from outside your cluster for `:` are directed to the workload. The `` can be the IP address of any node in your Kubernetes cluster. - -#### NodePort Pros - -- Creating a NodePort service provides a static public endpoint to your workload pods. There, even if the pods are destroyed, Kubernetes can deploy the workload anywhere in the cluster without altering the public endpoint. -- The scale of the pods is not limited by the number of nodes in the cluster. NodePort allows decoupling of public access from the number and location of pods. - -#### NodePort Cons - -- When a NodePort is used, that `:` is reserved in your Kubernetes cluster on all nodes, even if the workload is never deployed to the other nodes. -- You can only specify a port from a configurable range (by default, it is `30000-32767`). -- An extra Kubernetes object (a Kubernetes service of type NodePort) is needed to expose your workload. Thus, finding out how your application is exposed is not straightforward. - -## Setting NodePort - -You can set a NodePort for migrated workloads (i.e., services) using the Rancher v2.x UI. To add a NodePort, browse to the project containing your workloads, and edit each workload that you want to expose, as shown below. Map the port that your service container exposes to a NodePort, which you'll be able to access from each cluster node. - -For example, for the `web-deployment.yml` file parsed from v1.6 that we've been using as a sample, we would edit its Kubernetes manifest, set the publish the port that the container uses, and then declare a NodePort. You can then access your workload by clicking the link created in the Rancher UI. - ->**Note:** -> ->- If you set a NodePort without giving it a value, Rancher chooses a port at random from the following range: `30000-32767`. ->- If you manually set a NodePort, you must assign it a value within the `30000-32767` range. - -
Port Mapping: Setting NodePort
- -{{< img "/img/rancher/set-nodeport.gif" "Set NodePort" >}} - -### [Next: Configure Health Checks]({{}}/rancher/v2.x/en/v1.6-migration/monitor-apps) diff --git a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md deleted file mode 100644 index 7293a2c2a..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "1. Get Started" -weight: 25 ---- -Get started with your migration to Rancher v2.x by installing Rancher and configuring your new Rancher environment. - -## Outline - - - -- [A. Install Rancher v2.x](#a-install-rancher-v2-x) -- [B. Configure Authentication](#b-configure-authentication) -- [C. Provision a Cluster and Project](#c-provision-a-cluster-and-project) -- [D. Create Stacks](#d-create-stacks) - - - - -## A. Install Rancher v2.x - -The first step in migrating from v1.6 to v2.x is to install the Rancher v2.x Server side-by-side with your v1.6 Server, as you'll need your old install during the migration process. Due to the architecture changes between v1.6 and v2.x, there is no direct path for upgrade. You'll have to install v2.x independently and then migrate your v1.6 services to v2.x. - -New for v2.x, all communication to Rancher Server is encrypted. The procedures below instruct you not only on installation of Rancher, but also creation and installation of these certificates. - -Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements]({{}}/rancher/v2.x/en/installation/requirements/). - -After provisioning your node(s), install Rancher: - -- [Docker Install]({{}}/rancher/v2.x/en/installation/single-node) - - For development environments, Rancher can be installed on a single node using Docker. This installation procedure deploys a single Rancher container to your host. - -- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) - - For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability Kubernetes installation. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. - - >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install]({{}}/rancher/v2.x/en/installation/install-rancher-on-k8s/) for full requirements. - -## B. Configure Authentication - -After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{}}/rancher/v2.x/en/admin-settings/authentication). - -
Rancher v2.x Authentication
- -![Rancher v2.x Authentication]({{}}/img/rancher/auth-providers.svg) - -### Local Users - -Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{}}/rancher/v2.x/en/admin-settings/authentication/) and assign them access rights. - -As a best practice, you should use a hybrid of external _and_ local authentication. This practice provides access to Rancher should your external authentication experience an interruption, as you can still log in using a local user account. Set up a few local accounts as administrative users of Rancher. - - -### SAML Authentication Providers - -In Rancher v1.6, we encouraged our SAML users to use Shibboleth, as it was the only SAML authentication option we offered. However, to better support their minor differences, we've added more fully tested SAML providers for v2.x: Ping Identity, Microsoft ADFS, and FreeIPA. - -## C. Provision a Cluster and Project - -Begin work in Rancher v2.x by using it to provision a new Kubernetes cluster, which is similar to an environment in v1.6. This cluster will host your application deployments. - -A cluster and project in combined together in Rancher v2.x is equivalent to a v1.6 environment. A _cluster_ is the compute boundary (i.e., your hosts) and a _project_ is an administrative boundary (i.e., a grouping of namespaces used to assign access rights to users). - -There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). - -### Clusters - -In Rancher v1.6, compute nodes were added to an _environment_. Rancher v2.x eschews the term _environment_ for _cluster_, as Kubernetes uses this term for a team of computers instead of _environment_. - -Rancher v2.x lets you launch a Kubernetes cluster anywhere. Host your cluster using: - -- A [hosted Kubernetes provider]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/). -- A [pool of nodes from an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). Rancher launches Kubernetes on the nodes. -- Any [custom node(s)]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. - -### Projects - -Additionally, Rancher v2.x introduces [projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. - -When you create a cluster, two projects are automatically created: - -- The `System` project, which includes system namespaces where important Kubernetes resources are running (like ingress controllers and cluster dns services) -- The `Default` project. - -However, for production environments, we recommend [creating your own project]({{}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/#creating-projects) and giving it a descriptive name. - -After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. - -## D. Create Stacks - -In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/), which are the v2.x equivalent of stacks, for the same purpose. - -In Rancher v2.x, namespaces are child objects to projects. When you create a project, a `default` namespace is added to the project, but you can create your own to parallel your stacks from v1.6. - -During migration, if you don't explicitly define which namespace a service should be deployed to, it's deployed to the `default` namespace. - -Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery]({{}}/rancher/v2.x/en/v1.6-migration/discover-services) soon). - - -### [Next: Migrate Your Services]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool) diff --git a/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md b/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md deleted file mode 100644 index a29115d4d..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/kub-intro/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Kubernetes Introduction -weight: 1 ---- - -Rancher v2.x is built on the [Kubernetes](https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational) container orchestrator. This shift in underlying technology for v2.x is a large departure from v1.6, which supported several popular container orchestrators. Since Rancher is now based entirely on Kubernetes, it's helpful to learn the Kubernetes basics. - -The following table introduces and defines some key Kubernetes concepts. - -| **Concept** | **Definition** | -| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Cluster | A collection of machines that run containerized applications managed by Kubernetes. | -| Namespace | A virtual cluster, multiple of which can be supported by a single physical cluster. | -| Node | One of the physical or virtual machines that make up a cluster. | -| Pod | The smallest and simplest Kubernetes object. A pod represents a set of running [containers](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/#why-containers) on your cluster. | -| Deployment | An API object that manages a replicated application. | -| Workload | Workloads are objects that set deployment rules for pods. | - - -## Migration Cheatsheet - -Because Rancher v1.6 defaulted to our Cattle container orchestrator, it primarily used terminology related to Cattle. However, because Rancher v2.x uses Kubernetes, it aligns with the Kubernetes naming standard. This shift could be confusing for people unfamiliar with Kubernetes, so we've created a table that maps terms commonly used in Rancher v1.6 to their equivalents in Rancher v2.x. - -| **Rancher v1.6** | **Rancher v2.x** | -| --- | --- | -| Container | Pod | -| Services | Workload | -| Load Balancer | Ingress | -| Stack | Namespace | -| Environment | Project (Administration)/Cluster (Compute) -| Host | Node | -| Catalog | Helm | -| Port Mapping | HostPort (Single Node)/NodePort (All Nodes) | - -
-More detailed information on Kubernetes concepts can be found in the -[Kubernetes Concepts Documentation](https://kubernetes.io/docs/concepts/). - -### [Next: Get Started]({{}}/rancher/v2.x/en/v1.6-migration/get-started/) diff --git a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md deleted file mode 100644 index b25e5709e..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: "7. Load Balancing" -weight: 700 ---- - -If your applications are public-facing and consume significant traffic, you should place a load balancer in front of your cluster so that users can always access their apps without service interruption. Typically, you can fulfill a high volume of service requests by [horizontally scaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) your deployment, which spins up additional application containers as traffic ramps up. However, this technique requires routing that distributes traffic across your nodes efficiently. In cases where you need to accommodate public traffic that scales up and down, you'll need a load balancer. - -As outlined in [its documentation]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/), Rancher v1.6 provided rich support for load balancing using its own microservice powered by HAProxy, which supports HTTP, HTTPS, TCP hostname, and path-based routing. Most of these same features are available in v2.x. However, load balancers that you used with v1.6 cannot be migrated to v2.x. You'll have to manually recreate your v1.6 load balancer in v2.x. - -If you encounter the `output.txt` text below after parsing your v1.6 Compose files to Kubernetes manifests, you'll have to resolve it by manually creating a load balancer in v2.x. - -
output.txt Load Balancer Directive
- -![Resolve Load Balancer Directive]({{}}/img/rancher/resolve-load-balancer.png) - -## In This Document - - - -- [Load Balancing Protocol Options](#load-balancing-protocol-options) -- [Load Balancer Deployment](#load-balancer-deployment) -- [Load Balancing Architecture](#load-balancing-architecture) -- [Ingress Caveats](#ingress-caveats) -- [Deploying Ingress](#deploying-ingress) -- [Rancher v2.x Load Balancing Limitations](#rancher-v2-x-load-balancing-limitations) - - - -## Load Balancing Protocol Options - -By default, Rancher v2.x replaces the v1.6 load balancer microservice with the native [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), which is backed by NGINX Ingress Controller for layer 7 load balancing. By default, Kubernetes Ingress only supports the HTTP and HTTPS protocols, not TCP. Load balancing is limited to these two protocols when using Ingress. - -> **TCP Required?** See [TCP Load Balancing Options](#tcp-load-balancing-options) - - -## Load Balancer Deployment - -In Rancher v1.6, you could add port/service rules for configuring your HA proxy to load balance for target services. You could also configure the hostname/path-based routing rules. - -Rancher v2.x offers similar functionality, but load balancing is instead handled by Ingress. An Ingress is a specification of rules that a controller component applies to your load balancer. The actual load balancer can run outside of your cluster or within it. - -By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only. - -RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. - -For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). - -## Load Balancing Architecture - -Deployment of Ingress Controller in v2.x as a DaemonSet brings some architectural changes that v1.6 users should know about. - -In Rancher v1.6 you could deploy a scalable load balancer service within your stack. If you had four hosts in your Cattle environment, you could deploy one load balancer service with a scale of two and point to your application by appending port 80 to your two host IP Addresses. You could also launch another load balancer on the remaining two hosts to balance a different service again using port 80 because your load balancer is using different host IP Addresses). - - - -
Rancher v1.6 Load Balancing Architecture
- -![Rancher v1.6 Load Balancing]({{}}/img/rancher/cattle-load-balancer.svg) - -The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads. - -
Rancher v2.x Load Balancing Architecture
- -![Rancher v2.x Load Balancing]({{}}/img/rancher/kubernetes-load-balancer.svg) - -## Ingress Caveats - -Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balancing, you must use unique host names and paths when configuring your workloads. This limitation derives from: - -- Ingress confinement to ports 80 and 443 (i.e, the ports HTTP[S] uses for routing). -- The load balancer and the Ingress Controller is launched globally for the cluster as a DaemonSet. - -> **TCP Required?** Rancher v2.x still supports TCP. See [TCP Load Balancing Options](#tcp-load-balancing-options) for workarounds. - -## Deploying Ingress - -You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Deploy**. During deployment, you can choose a target project or namespace. - ->**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. -> - -![Workload Scale]({{}}/img/rancher/workload-scale.png) - -For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. - -
Browsing to Load Balancer Tab and Adding Ingress
- -![Adding Ingress]({{}}/img/rancher/add-ingress.gif) - -Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. - -### Configuring Host- and Path-Based Routing - -Using Rancher v2.x, you can add Ingress rules that are based on host names or a URL path. Based on the rules you create, your NGINX Ingress Controller routes traffic to multiple target workloads or Kubernetes services. - -For example, let's say you have multiple workloads deployed to a single namespace. You can add an Ingress to route traffic to these two workloads using the same hostname but different paths, as depicted in the image below. URL requests to `foo.com/name.html` will direct users to the `web` workload, and URL requests to `foo.com/login` will direct users to the `chat` workload. - -
Ingress: Path-Based Routing Configuration
- -![Ingress: Path-Based Routing Configuration]({{}}/img/rancher/add-ingress-form.png) - -Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address. - -
Workload Links
- -![Load Balancer Links to Workloads]({{}}/img/rancher/load-balancer-links.png) - -The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: - -``` -kubectl get ingress -``` - -### HTTPS/Certificates Option - -Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want to use it, you need to use a valid SSL/TLS certificate. While configuring Ingress rules, use the **SSL/TLS Certificates** section to configure a certificate. - -- We recommend [uploading a certificate]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. -- If you have configured [NGINX default certificate]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. - -
Load Balancer Configuration: SSL/TLS Certificate Section
- -![SSL/TLS Certificates Section]({{}}/img/rancher/load-balancer-ssl-certs.png) - -### TCP Load Balancing Options - -#### Layer-4 Load Balancer - -For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. - -For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`. - -
Workload Deployment: Layer 4 Load Balancer Creation
- -![Deploy Layer-4 Load Balancer]({{}}/img/rancher/deploy-workload-load-balancer.png) - -Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. - -#### NGINX Ingress Controller TCP Support by ConfigMaps - -Although NGINX supports TCP, Kubernetes Ingress itself does not support the TCP protocol. Therefore, out-of-the-box configuration of NGINX Ingress Controller for TCP balancing isn't possible. - -However, there is a workaround to use NGINX's TCP balancing by creating a Kubernetes ConfigMap, as described in the [Ingress GitHub readme](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/exposing-tcp-udp-services.md). You can create a ConfigMap object that stores pod configuration parameters as key-value pairs, separate from the pod image, as described in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/). - -To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. - -![Layer-4 Load Balancer: ConfigMap Workaround]({{}}/img/rancher/layer-4-lb-config-map.png) - -The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. - -## Rancher v2.x Load Balancing Limitations - -Cattle provided feature-rich load balancer support that is [well documented]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/#load-balancers). Some of these features do not have equivalents in Rancher v2.x. This is the list of such features: - -- No support for SNI in current NGINX Ingress Controller. -- TCP load balancing requires a load balancer appliance enabled by cloud provider within the cluster. There is no Ingress support for TCP on Kubernetes. -- Only ports 80 and 443 can be configured for HTTP/HTTPS routing via Ingress. Also Ingress Controller is deployed globally as a DaemonSet and not launched as a scalable service. Also, users cannot assign random external ports to be used for balancing. Therefore, users need to ensure that they configure unique hostname/path combinations to avoid routing conflicts using the same two ports. -- There is no way to specify port rule priority and ordering. -- Rancher v1.6 added support for draining backend connections and specifying a drain timeout. This is not supported in Rancher v2.x. -- There is no support for specifying a custom stickiness policy and a custom load balancer config to be appended to the default config as of now in Rancher v2.x. There is some support, however, available in native Kubernetes for customizing the NGINX configuration as noted in the [NGINX Ingress Controller Custom Configuration Documentation](https://kubernetes.github.io/ingress-nginx/examples/customization/custom-configuration/). - -### Finished! diff --git a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md deleted file mode 100644 index b1a2f1cc1..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: "4. Configure Health Checks" -weight: 400 ---- - -Rancher v1.6 provided TCP and HTTP health checks on your nodes and services using its own health check microservice. These health checks monitored your containers to confirm they're operating as intended. If a container failed a health check, Rancher would destroy the unhealthy container and then replicates a healthy one to replace it. - -For Rancher v2.x, we've replaced the health check microservice, leveraging instead Kubernetes' native health check support. - -Use this document to correct Rancher v2.x workloads and services that list `health_check` in `output.txt`. You can correct them by configuring a liveness probe (i.e., a health check). - -For example, for the image below, we would configure liveness probes for the `web` and `weblb` workloads (i.e., the Kubernetes manifests output by migration-tools CLI). - -
Resolve health_check for the web and webLB Workloads
- -![Resolve health_check]({{}}/img/rancher/resolve-health-checks.png) - -## In This Document - - - -- [Rancher v1.6 Health Checks](#rancher-v1-6-health-checks) -- [Rancher v2.x Health Checks](#rancher-v2-x-health-checks) -- [Configuring Probes in Rancher v2.x](#configuring-probes-in-rancher-v2-x) - - - -## Rancher v1.6 Health Checks - -In Rancher v1.6, you could add health checks to monitor a particular service's operations. These checks were performed by the Rancher health check microservice, which is launched in a container on a node separate from the node hosting the monitored service (however, Rancher v1.6.20 and later also runs a local health check container as a redundancy for the primary health check container on another node). Health check settings were stored in the `rancher-compose.yml` file for your stack. - -The health check microservice features two types of health checks, which have a variety of options for timeout, check interval, etc.: - -- **TCP health checks**: - - These health checks check if a TCP connection opens at the specified port for the monitored service. For full details, see the [Rancher v1.6 documentation]({{}}/rancher/v1.6/en/cattle/health-checks/). - -- **HTTP health checks**: - - These health checks monitor HTTP requests to a specified path and check whether the response is expected response (which is configured along with the health check). - -The following diagram displays the health check microservice evaluating a container running Nginx. Notice that the microservice is making its check across nodes. - -![Rancher v1.6 Health Checks]({{}}/img/rancher/healthcheck.svg) - -## Rancher v2.x Health Checks - -In Rancher v2.x, the health check microservice is replaced with Kubernetes's native health check mechanisms, called _probes_. These probes, similar to the Rancher v1.6 health check microservice, monitor the health of pods over TCP and HTTP. - -However, probes in Rancher v2.x have some important differences, which are described below. For full details about probes, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes). - - -### Local Health Checks - -Unlike the Rancher v1.6 health checks performed across hosts, probes in Rancher v2.x occur on _same_ host, performed by the kubelet. - - -### Multiple Probe Types - -Kubernetes includes two different _types_ of probes: liveness checks and readiness checks. - -- **Liveness Check**: - - Checks if the monitored container is running. If the probe reports failure, Kubernetes kills the pod, and then restarts it according to the deployment [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). - -- **Readiness Check**: - - Checks if the container is ready to accept and serve requests. If the probe reports failure, the pod is sequestered from the public until it self heals. - -The following diagram displays kubelets running probes on containers they are monitoring ([kubelets](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) are the primary "agent" running on each node). The node on the left is running a liveness probe, while the one of the right is running a readiness check. Notice that the kubelet is scanning containers on its host node rather than across nodes, as in Rancher v1.6. - -![Rancher v2.x Probes]({{}}/img/rancher/probes.svg) - -## Configuring Probes in Rancher v2.x - -The [migration-tool CLI]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. - -Using the Rancher v2.x UI, you can add TCP or HTTP health checks to Kubernetes workloads. By default, Rancher asks you to configure a readiness check for your workloads and applies a liveness check using the same configuration. Optionally, you can define a separate liveness check. - -If the probe fails, the container is restarted per the restartPolicy defined in the workload specs. This setting is equivalent to the strategy parameter for health checks in Rancher v1.6. - -Configure probes by using the **Health Check** section while editing deployments called out in `output.txt`. - -
Edit Deployment: Health Check Section
- -![Health Check Section]({{}}/img/rancher/health-check-section.png) - -### Configuring Checks - -While you create a workload using Rancher v2.x, we recommend configuring a check that monitors the health of the deployment's pods. - -{{% tabs %}} - -{{% tab "TCP Check" %}} - -TCP checks monitor your deployment's health by attempting to open a connection to the pod over a specified port. If the probe can open the port, it's considered healthy. Failure to open it is considered unhealthy, which notifies Kubernetes that it should kill the pod and then replace it according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). - -You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). - -![TCP Check]({{}}/img/rancher/readiness-check-tcp.png) - -When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. - - - -{{% /tab %}} - -{{% tab "HTTP Check" %}} - -HTTP checks monitor your deployment's health by sending an HTTP GET request to a specific URL path that you define. If the pod responds with a message range of `200`-`400`, the health check is considered successful. If the pod replies with any other value, the check is considered unsuccessful, so Kubernetes kills and replaces the pod according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). - -You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). - -![HTTP Check]({{}}/img/rancher/readiness-check-http.png) - -When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. - -{{% /tab %}} - -{{% /tabs %}} - -### Configuring Separate Liveness Checks - -While configuring a readiness check for either the TCP or HTTP protocol, you can configure a separate liveness check by clicking the **Define a separate liveness check**. For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). - -![Separate Liveness Check]({{}}/img/rancher/separate-check.png) - -### Additional Probing Options - -Rancher v2.x, like v1.6, lets you perform health checks using the TCP and HTTP protocols. However, Rancher v2.x also lets you check the health of a pod by running a command inside of it. If the container exits with a code of `0` after running the command, the pod is considered healthy. - -You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). - -![Healthcheck Execute Command]({{}}/img/rancher/healthcheck-cmd-exec.png) - -#### Health Check Parameter Mappings - -While configuring readiness checks and liveness checks, Rancher prompts you to fill in various timeout and threshold values that determine whether the probe is a success or failure. The reference table below shows you the equivalent health check values from Rancher v1.6. - -Rancher v1.6 Compose Parameter | Rancher v2.x Kubernetes Parameter --------------------------------|----------------------------------- -`port` | `tcpSocket.port` -`response_timeout` | `timeoutSeconds` -`healthy_threshold` | `failureThreshold` -`unhealthy_threshold` | `successThreshold` -`interval` | `periodSeconds` -`initializing_timeout` | `initialDelaySeconds` -`strategy` | `restartPolicy` - -### [Next: Schedule Your Services]({{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/) diff --git a/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md b/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md deleted file mode 100644 index fa810e819..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/_index.md +++ /dev/null @@ -1,311 +0,0 @@ ---- -title: 2. Migrate Your Services -weight: 100 ---- - -Although your services from v1.6 won't work in Rancher v2.x by default, that doesn't mean you have to start again from square one, manually rebuilding your applications in v2.x. To help with migration from v1.6 to v2.x, Rancher has developed a migration tool. The migration-tools CLI is a utility that helps you recreate your applications in Rancher v2.x. This tool exports your Rancher v1.6 services as Compose files and converts them to a Kubernetes manifest that Rancher v2.x can consume. - -Additionally, for each Rancher v1.6-specific Compose directive that cannot be consumed by Kubernetes, migration-tools CLI provides instructions on how to manually recreate them in Rancher v2.x. - -This command line interface tool will: - -- Export Compose files (i.e., `docker-compose.yml` and `rancher-compose.yml`) for each stack in your v1.6 Cattle environment. For every stack, files are exported to a unique folder: `//`. - -- Parse Compose files that you’ve exported from your Rancher v1.6 stacks and converts them to Kubernetes manifests that Rancher v2.x can consume. The tool also outputs a list of directives present in the Compose files that cannot be converted automatically to Rancher v2.x. These are directives that you’ll have to manually configure using the Rancher v2.x UI. - -## Outline - - - -- [A. Download the migration-tools CLI](#a-download-the-migration-tools-cli) -- [B. Configure the migration-tools CLI](#b-configure-the-migration-tools-cli) -- [C. Run the migration-tools CLI](#c-run-the-migration-tools-cli) -- [D. Deploy Services Using Rancher CLI](#d-re-deploy-services-as-kubernetes-manifests) -- [What Now?](#what-now) - - - - - -## A. Download the migration-tools CLI - -The migration-tools CLI for your platform can be downloaded from our [GitHub releases page](https://github.com/rancher/migration-tools/releases). The tools are available for Linux, Mac, and Windows platforms. - - -## B. Configure the migration-tools CLI - -After you download migration-tools CLI, rename it and make it executable. - -1. Open a terminal window and change to the directory that contains the migration-tool file. - -1. Rename the file to `migration-tools` so that it no longer includes the platform name. - -1. Enter the following command to make `migration-tools` executable: - - ``` - chmod +x migration-tools - ``` - -## C. Run the migration-tools CLI - -Next, use the migration-tools CLI to export all stacks in all of the Cattle environments into Compose files. Then, for stacks that you want to migrate to Rancher v2.x, convert the Compose files into Kubernetes manifest. - ->**Prerequisite:** Create an [Account API Key]({{}}/rancher/v1.6/en/api/v2-beta/api-keys/#account-api-keys) to authenticate with Rancher v1.6 when using the migration-tools CLI. - -1. Export the Docker Compose files for your Cattle environments and stacks from Rancher v1.6. - - In the terminal window, execute the following command, replacing each placeholder with your values. - - ``` - migration-tools export --url http:// --access-key --secret-key --export-dir --all - ``` - - **Step Result:** migration-tools exports Compose files (`docker-compose.yml` and `rancher-compose.yml`) for each stack in the `--export-dir` directory. If you omitted this option, Compose files are output to your current directory. - - A unique directory is created for each environment and stack. For example, if we export each [environment/stack]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) from Rancher v1.6, the following directory structure is created: - - ``` - export/ # migration-tools --export-dir - |--/ # Rancher v1.6 ENVIRONMENT - |--/ # Rancher v1.6 STACK - |--docker-compose.yml # STANDARD DOCKER DIRECTIVES FOR ALL STACK SERVICES - |--rancher-compose.yml # RANCHER-SPECIFIC DIRECTIVES FOR ALL STACK SERVICES - |--README.md # README OF CHANGES FROM v1.6 to v2.x - ``` - - - -1. Convert the exported Compose files to Kubernetes manifest. - - Execute the following command, replacing each placeholder with the absolute path to your Stack's Compose files. If you want to migrate multiple stacks, you'll have to re-run the command for each pair of Compose files that you exported. - - ``` - migration-tools parse --docker-file --rancher-file - ``` - - >**Note:** If you omit the `--docker-file` and `--rancher-file` options from your command, migration-tools uses the current working directory to find Compose files. - ->**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference]({{}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/). - -### migration-tools CLI Output - -After you run the migration-tools parse command, the following files are output to your target directory. - -| Output | Description | -| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `output.txt` | This file lists how to recreate your Rancher v1.6-specific functionality in Kubernetes. Each listing links to the relevant blog articles on how to implement it in Rancher v2.x. | -| Kubernetes manifest specs | Migration-tools internally invokes [Kompose](https://github.com/kubernetes/kompose) to generate a Kubernetes manifest for each service you're migrating to v2.x. Each YAML spec file is named for the service you're migrating. - -#### Why are There Separate Deployment and Service Manifests? - -To make an application publicly accessible by URL, a Kubernetes service is required in support of the deployment. A Kubernetes service is a REST object that abstracts access to the pods in the workload. In other words, a service provides a static endpoint to the pods by mapping a URL to pod(s) Therefore, even if the pods change IP address, the public endpoint remains unchanged. A service object points to its corresponding deployment (workload) by using selector labels. - -When a you export a service from Rancher v1.6 that exposes public ports, migration-tools CLI parses those ports to a Kubernetes service spec that links to a deployment YAML spec. - -#### Migration Example File Output - -If we parse the two example files from [Migration Example Files]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: - -File | Description ------|------------ -`web-deployment.yaml` | A file containing Kubernetes container specs for a Let's Chat deployment. -`web-service.yaml` | A file containing specs for the Let's Chat service. -`database-deployment.yaml` | A file containing container specs for the MongoDB deployment in support of Let's Chat. -`webLB-deployment.yaml` | A file containing container specs for an HAProxy deployment that's serving as a load balancer.1 -`webLB-service.yaml` | A file containing specs for the HAProxy service.1 - ->1 Because Rancher v2.x uses Ingress for load balancing, we won't be migrating our Rancher v1.6 load balancer to v2.x. - - - -## D. Re-Deploy Services as Kubernetes Manifests - ->**Note:** Although these instructions deploy your v1.6 services in Rancher v2.x, they will not work correctly until you adjust their Kubernetes manifests. - -{{% tabs %}} -{{% tab "Rancher UI" %}} - -You can deploy the Kubernetes manifests created by migration-tools by importing them into Rancher v2.x. - ->**Receiving an `ImportYaml Error`?** -> ->Delete the YAML directive listed in the error message. These are YAML directives from your v1.6 services that Kubernetes can't read. - -
Deploy Services: Import Kubernetes Manifest
- -![Deploy Services]({{}}/img/rancher/deploy-service.gif) - -{{% /tab %}} -{{% tab "Rancher CLI" %}} - - ->**Prerequisite:** [Install Rancher CLI]({{}}/rancher/v2.x/en/cli/) for Rancher v2.x. - -Use the following Rancher CLI commands to deploy your application using Rancher v2.x. For each Kubernetes manifest output by migration-tools CLI, enter one of the commands below to import it into Rancher v2.x. - -``` -./rancher kubectl create -f # DEPLOY THE DEPLOYMENT YAML - -./rancher kubectl create -f # DEPLOY THE SERVICE YAML -``` - -{{% /tab %}} -{{% /tabs %}} - -Following importation, you can view your v1.6 services in the v2.x UI as Kubernetes manifests by using the context menu to select ` > ` that contains your services. The imported manifests will display on the **Resources > Workloads** and on the tab at **Resources > Workloads > Service Discovery.** (In Rancher v2.x before v2.3.0, these are on the **Workloads** and **Service Discovery** tabs in the top navigation bar.) - -
Imported Services
- -![Imported Services]({{}}/img/rancher/imported-workloads.png) - -## What Now? - -Although the migration-tool CLI parses your Rancher v1.6 Compose files to Kubernetes manifests, there are discrepancies between v1.6 and v2.x that you must address by manually editing your parsed [Kubernetes manifests](#output). In other words, you need to edit each workload and service imported into Rancher v2.x, as displayed below. - -
Edit Migrated Services
- -![Edit Migrated Workload]({{}}/img/rancher/edit-migration-workload.gif) - -As mentioned in [Migration Tools CLI Output](#migration-tools-cli-output), the `output.txt` files generated during parsing lists the manual steps you must make for each deployment. Review the upcoming topics for more information on manually editing your Kubernetes specs. - -Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files]({{}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x. - -
Output.txt Example
- -![output.txt]({{}}/img/rancher/output-dot-text.png) - -The following table lists possible directives that may appear in `output.txt`, what they mean, and links on how to resolve them. - -Directive | Instructions -----------|-------------- -[ports][4] | Rancher v1.6 _Port Mappings_ cannot be migrated to v2.x. Instead, you must manually declare either a HostPort or NodePort, which are similar to Port Mappings. -[health_check][1] | The Rancher v1.6 health check microservice has been replaced with native Kubernetes health checks, called _probes_. Recreate your v1.6 health checks in v2.0 using probes. -[labels][2] | Rancher v1.6 uses labels to implement a variety of features in v1.6. In v2.x, Kubernetes uses different mechanisms to implement these features. Click through on the links here for instructions on how to address each label.

[io.rancher.container.pull_image][7]: In v1.6, this label instructed deployed containers to pull a new version of the image upon restart. In v2.x, this functionality is replaced by the `imagePullPolicy` directive.

[io.rancher.scheduler.global][8]: In v1.6, this label scheduled a container replica on every cluster host. In v2.x, this functionality is replaced by [Daemon Sets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/).

[io.rancher.scheduler.affinity][9]: In v2.x, affinity is applied in a different way. -[links][3] | During migration, you must create links between your Kubernetes workloads and services for them to function properly in v2.x. -[scale][5] | In v1.6, scale refers to the number of container replicas running on a single node. In v2.x, this feature is replaced by replica sets. -start_on_create | No Kubernetes equivalent. No action is required from you. - -[1]:{{}}/rancher/v2.x/en/v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x -[2]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[3]:{{}}/rancher/v2.x/en/v1.6-migration/discover-services -[4]:{{}}/rancher/v2.x/en/v1.6-migration/expose-services -[5]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node - - - -[7]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[8]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#scheduling-global-services -[9]:{{}}/rancher/v2.x/en/v1.6-migration/schedule-workloads/#label-affinity-antiaffinity - -### [Next: Expose Your Services]({{}}/rancher/v2.x/en/v1.6-migration/expose-services/) diff --git a/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md b/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md deleted file mode 100644 index 56fc0a817..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Migration Tools CLI Reference -weight: 100 ---- - -The migration-tools CLI includes multiple commands and options to assist your migration from Rancher v1.6 to Rancher v2.x. - -## Download - -The migration-tools CLI for your platform can be downloaded from our [GitHub releases page](https://github.com/rancher/migration-tools/releases). The tool is available for Linux, Mac, and Windows platforms. - -## Usage - -``` -migration-tools [global options] command [command options] [arguments...] -``` - -## Migration Tools Global Options - -The migration-tools CLI includes a handful of global options. - -| Global Option | Description | -| ----------------- | -------------------------------------------- | -| `--debug` | Enables debug logging. | -| `--log ` | Outputs logs to the path you enter. | -| `--help`, `-h` | Displays a list of all commands available. | -| `--version`, `-v` | Prints the version of migration-tools CLI in use.| - -## Commands and Command Options - -### Migration-Tools Export Reference - -The `migration-tools export` command exports all stacks from your Rancher v1.6 server into Compose files. - -#### Options - -| Option | Required? | Description| -| --- | --- |--- | -|`--url ` | ✓ | Rancher API endpoint URL (``). | -|`--access-key ` | ✓ | Rancher API access key. Using an account API key exports all stacks from all cattle environments (``). | -|`--secret-key ` | ✓ | Rancher API secret key associated with the access key. (``). | -|`--export-dir ` | | Base directory that Compose files export to under sub-directories created for each environment/stack (default: `Export`). | -|`--all`, `--a` | | Export all stacks. Using this flag exports any stack in a state of inactive, stopped, or removing. | -|`--system`, `--s` | | Export system and infrastructure stacks. | - - -#### Usage - -Execute the following command, replacing each placeholder with your values. The access key and secret key are Account API keys, which will allow you to export from all Cattle environments. - -``` -migration-tools export --url --access-key --secret-key --export-dir -``` - -**Result:** The migration-tools CLI exports Compose files for each stack in every Cattle environments in the `--export-dir` directory. If you omitted this option, the files are saved to your current directory. - -### Migration-Tools Parse Reference - -The `migration-tools parse` command parses the Compose files for a stack and uses [Kompose](https://github.com/kubernetes/kompose) to generate an equivalent Kubernetes manifest. It also outputs an `output.txt` file, which lists all the constructs that will need manual intervention in order to be converted to Kubernetes. - -#### Options - -| Option | Required? | Description -| ---|---|--- -|`--docker-file ` | | Parses Docker Compose file to output Kubernetes manifest(default: `docker-compose.yml`) -|`--output-file ` | | Name of file that outputs listing checks and advice for conversion (default: `output.txt`). -|`--rancher-file ` | | Parses Rancher Compose file to output Kubernetes manifest(default: `rancher-compose.yml`) - -#### Subcommands - -| Subcommand | Description | -| ---|---| -| `help`, `h` | Shows a list of options available for use with preceding command. | - -#### Usage - -Execute the following command, replacing each placeholder with the absolute path to your Stack's Compose files. For each stack, you'll have to re-run the command for each pair of Compose files that was exported. - -``` -migration-tools parse --docker-file --rancher-file -``` - ->**Note:** If you omit the `--docker-file` and `--rancher-file` options from your command, the migration-tools CLI checks its home directory for these Compose files. - -**Result:** The migration-tools CLI parses your Compose files and outputs Kubernetes manifest specs as well as an `output.txt` file. For each service in the stack, a Kubernetes manifest is created and named the same as your service. The `output.txt` file lists all constructs for each service in `docker-compose.yml` that requires special handling to be successfully migrated to Rancher v2.x. Each construct links to the relevant blog articles on how to implement it in Rancher v2.x. diff --git a/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md b/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md deleted file mode 100644 index 99f4df0d6..000000000 --- a/content/rancher/v2.x/en/v1.6-migration/schedule-workloads/_index.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -title: "5. Schedule Your Services" -weight: 500 ---- - -In v1.6, objects called _services_ were used to schedule containers to your cluster hosts. Services included the Docker image for an application, along with configuration settings for a desired state. - -In Rancher v2.x, the equivalent object is known as a _workload_. Rancher v2.x retains all scheduling functionality from v1.6, but because of the change from Cattle to Kubernetes as the default container orchestrator, the terminology and mechanisms for scheduling workloads has changed. - -Workload deployment is one of the more important and complex aspects of container orchestration. Deploying pods to available shared cluster resources helps maximize performance under optimum compute resource use. - -You can schedule your migrated v1.6 services while editing a deployment. Schedule services by using **Workload Type** and **Node Scheduling** sections, which are shown below. - -
Editing Workloads: Workload Type and Node Scheduling Sections
- -![Workload Type and Node Scheduling Sections]({{}}/img/rancher/migrate-schedule-workloads.png) - -## In This Document - - - - - -- [What's Different for Scheduling Services?](#whats-different-for-scheduling-services) -- [Node Scheduling Options](#node-scheduling-options) -- [Scheduling Pods to a Specific Node](#scheduling-pods-to-a-specific-node) -- [Scheduling Using Labels](#scheduling-using-labels) -- [Scheduling Pods Using Resource Constraints](#scheduling-pods-using-resource-constraints) -- [Preventing Scheduling Specific Services to Specific Nodes](#preventing-scheduling-specific-services-to-specific-nodes) -- [Scheduling Global Services](#scheduling-global-services) - - - - -## What's Different for Scheduling Services? - - -Rancher v2.x retains _all_ methods available in v1.6 for scheduling your services. However, because the default container orchestration system has changed from Cattle to Kubernetes, the terminology and implementation for each scheduling option has changed. - -In v1.6, you would schedule a service to a host while adding a service to a Stack. In Rancher v2.x., the equivalent action is to schedule a workload for deployment. The following composite image shows a comparison of the UI used for scheduling in Rancher v2.x versus v1.6. - -![Node Scheduling: Rancher v2.x vs v1.6]({{}}/img/rancher/node-scheduling.png) - -## Node Scheduling Options - -Rancher offers a variety of options when scheduling nodes to host workload pods (i.e., scheduling hosts for containers in Rancher v1.6). - -You can choose a scheduling option as you deploy a workload. The term _workload_ is synonymous with adding a service to a Stack in Rancher v1.6). You can deploy a workload by using the context menu to browse to a cluster project (` > > Workloads`). - -The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). - -Option | v1.6 Feature | v2.x Feature --------|------|------ -[Schedule a certain number of pods?](#schedule-a-certain-number-of-pods) | ✓ | ✓ -[Schedule pods to specific node?](#scheduling-pods-to-a-specific-node) | ✓ | ✓ -[Schedule to nodes using labels?](#applying-labels-to-nodes-and-pods) | ✓ | ✓ -[Schedule to nodes using label affinity/anti-affinity rules?](#label-affinity-antiaffinity) | ✓ | ✓ -[Schedule based on resource constraints?](#scheduling-pods-using-resource-constraints) | ✓ | ✓ -[Preventing scheduling specific services to specific hosts?](#preventing-scheduling-specific-services-to-specific-nodes) | ✓ | ✓ -[Schedule services globally?](#scheduling-global-services) | ✓ | ✓ - - -### Schedule a certain number of pods - -In v1.6, you could control the number of container replicas deployed for a service. You can schedule pods the same way in v2.x, but you'll have to set the scale manually while editing a workload. - -![Resolve Scale]({{}}/img/rancher/resolve-scale.png) - -During migration, you can resolve `scale` entries in `output.txt` by setting a value for the **Workload Type** option **Scalable deployment** depicted below. - -
Scalable Deployment Option
- -![Workload Scale]({{}}/img/rancher/workload-type-option.png) - -### Scheduling Pods to a Specific Node - -Just as you could schedule containers to a single host in Rancher v1.6, you can schedule pods to single node in Rancher v2.x - -As you deploy a workload, use the **Node Scheduling** section to choose a node to run your pods on. The workload below is being scheduled to deploy an Nginx image with a scale of two pods on a specific node. - - -
Rancher v2.x: Workload Deployment
- -![Workload Tab and Group by Node Icon]({{}}/img/rancher/schedule-specific-node.png) - -Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. - -If you expose the workload using a NodePort that conflicts with another workload, the deployment gets created successfully, but no NodePort service is created. Therefore, the workload isn't exposed outside of the cluster. - -After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. - -![Pods Scheduled to Same Node]({{}}/img/rancher/scheduled-nodes.png) - - - -### Scheduling Using Labels - -In Rancher v2.x, you can constrain pods for scheduling to specific nodes (referred to as hosts in v1.6). Using [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/), which are key/value pairs that you can attach to different Kubernetes objects, you can configure your workload so that pods you've labeled are assigned to specific nodes (or nodes with specific labels are automatically assigned workload pods). - -
Label Scheduling Options
- -Label Object | Rancher v1.6 | Rancher v2.x --------------|--------------|--------------- -Schedule by Node? | ✓ | ✓ -Schedule by Pod? | ✓ | ✓ - -#### Applying Labels to Nodes and Pods - -Before you can schedule pods based on labels, you must first apply labels to your pods or nodes. - ->**Hooray!** ->All the labels that you manually applied in Rancher v1.6 (but _not_ the ones automatically created by Rancher) are parsed by migration-tools CLI, meaning you don't have to manually reapply labels. - -To apply labels to pods, make additions to the **Labels and Annotations** section as you configure your workload. After you complete workload configuration, you can view the label by viewing each pod that you've scheduled. To apply labels to nodes, edit your node and make additions to the **Labels** section. - - -#### Label Affinity/AntiAffinity - -Some of the most-used scheduling features in v1.6 were affinity and anti-affinity rules. - -
output.txt Affinity Label
- -![Affinity Label]({{}}/img/rancher/resolve-affinity.png) - -- **Affinity** - - Any pods that share the same label are scheduled to the same node. Affinity can be configured in one of two ways: - - Affinity | Description - ---------|------------ - **Hard** | A hard affinity rule means that the host chosen must satisfy all the scheduling rules. If no such host can be found, the workload will fail to deploy. In the Kubernetes manifest, this rule translates to the `nodeAffinity` directive.

To use hard affinity, configure a rule using the **Require ALL of** section (see figure below). - **Soft** | Rancher v1.6 user are likely familiar with soft affinity rules, which try to schedule the deployment per the rule, but can deploy even if the rule is not satisfied by any host.

To use soft affinity, configure a rule using the **Prefer Any of** section (see figure below). - -
- -
Affinity Rules: Hard and Soft
- - ![Affinity Rules]({{}}/img/rancher/node-scheduling-affinity.png) - -- **AntiAffinity** - - Any pods that share the same label are scheduled to different nodes. In other words, while affinity _attracts_ a specific label to each other, anti-affinity _repels_ a label from itself, so that pods are scheduled to different nodes. - - You can create an anti-affinity rules using either hard or soft affinity. However, when creating your rule, you must use either the `is not set` or `not in list` operator. - - For anti-affinity rules, we recommend using labels with phrases like `NotIn` and `DoesNotExist`, as these terms are more intuitive when users are applying anti-affinity rules. - -
AntiAffinity Operators
- - ![AntiAffinity ]({{}}/img/rancher/node-schedule-antiaffinity.png) - -Detailed documentation for affinity/anti-affinity is available in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - -Affinity rules that you create in the UI update your workload, adding pod affinity/anti-affinity directives to the workload Kubernetes manifest specs. - - -### Preventing Scheduling Specific Services to Specific Nodes - -In Rancher v1.6 setups, you could prevent services from being scheduled to specific nodes with the use of labels. In Rancher v2.x, you can reproduce this behavior using native Kubernetes scheduling options. - -In Rancher v2.x, you can prevent pods from being scheduled to specific nodes by applying _taints_ to a node. Pods will not be scheduled to a tainted node unless it has special permission, called a _toleration_. A toleration is a special label that allows a pod to be deployed to a tainted node. While editing a workload, you can apply tolerations using the **Node Scheduling** section. Click **Show advanced options**. - -
Applying Tolerations
- -![Tolerations]({{}}/img/rancher/node-schedule-advanced-options.png) - -For more information, see the Kubernetes documentation on [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/). - -### Scheduling Global Services - -Rancher v1.6 included the ability to deploy [global services]({{}}/rancher/v1.6/en/cattle/scheduling/#global-service), which are services that deploy duplicate containers to each host in the environment (i.e., nodes in your cluster using Rancher v2.x terms). If a service has the `io.rancher.scheduler.global: 'true'` label declared, then Rancher v1.6 schedules a service container on each host in the environment. - -
output.txt Global Service Label
- -![Global Service Label]({{}}/img/rancher/resolve-global.png) - -In Rancher v2.x, you can schedule a pod to each node using a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), which is a specific type of workload ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. - -To create a daemonset while configuring a workload, choose **Run one pod on each node** from the **Workload Type** options. - -
Workload Configuration: Choose run one pod on each node to configure daemonset
- -![choose Run one pod on each node]({{}}/img/rancher/workload-type.png) - -### Scheduling Pods Using Resource Constraints - -While creating a service in the Rancher v1.6 UI, you could schedule its containers to hosts based on hardware requirements that you choose. The containers are then scheduled to hosts based on which ones have bandwidth, memory, and CPU capacity. - -In Rancher v2.x, you can still specify the resources required by your pods. However, these options are unavailable in the UI. Instead, you must edit your workload's manifest file to declare these resource constraints. - -To declare resource constraints, edit your migrated workloads, editing the **Security & Host** sections. - -- To reserve a minimum hardware reservation available for your pod(s), edit the following sections: - - - Memory Reservation - - CPU Reservation - - NVIDIA GPU Reservation - -- To set a maximum hardware limit for your pods, edit: - - - Memory Limit - - CPU Limit - -
Scheduling: Resource Constraint Settings
- -![Resource Constraint Settings]({{}}/img/rancher/resource-constraint-settings.png) - -You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). - -### [Next: Service Discovery]({{}}/rancher/v2.x/en/v1.6-migration/discover-services/) diff --git a/pull_request_template.md b/pull_request_template.md index 5c99ddf9f..679d7d28f 100644 --- a/pull_request_template.md +++ b/pull_request_template.md @@ -1,10 +1,3 @@ -When contributing to docs, please don't update the content in the v2.x folder. -It's better to update the versioned docs, for example, the v2.5 or v2.6 docs. +When contributing to docs, please update the versioned docs, for example, the docs in the v2.6 folder. -This content in v2.x was separated into versioned documentation during the v2.5.8 -release. The content relevant to Rancher versions before v2.5 went into the v2.0-v2.4 -folder, while the content related to Rancher v2.5 went into the v2.5 folder. - -We are trying to get the 2.x content to be removed from Google search results. The only -reason we haven't deleted it is because Google search results would lead to 404 -errors if we deleted it. +Doc versions older than the latest minor version should only be updated to fix inaccuracies or make minor updates as necessary. The majority of new content should be added to the folder for the latest minor version. \ No newline at end of file