Compare commits

..

No commits in common. "main" and "redis-cluster-0.15.8" have entirely different histories.

139 changed files with 31823 additions and 6523 deletions

View File

@ -8,31 +8,22 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Helm
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: v3.16.2
version: v3.5.0
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
python-version: 3.7
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
uses: helm/chart-testing-action@v2.3.1
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ct.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: |
ct lint --config ct.yaml
echo "::set-output name=changed::true"
fi

View File

@ -25,4 +25,3 @@ jobs:
VALIDATE_YAML: false
DEFAULT_BRANCH: main
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FILTER_REGEX_EXCLUDE: .*(README\.md|NOTES.txt).*

View File

@ -25,16 +25,8 @@ jobs:
- name: Install Helm
uses: azure/setup-helm@v3
with:
version: v3.16.2
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
version: v3.5.4
- name: Add Helm Repository
run: |
helm repo add jetstack https://charts.jetstack.io
@ -42,22 +34,8 @@ jobs:
- name: Update Helm Repositories
run: helm repo update
- name: Update Chart Dependencies for karpenter
run: helm dependency update charts/karpenter
- name: List Changed Charts
id: list-changed
run: |
changed_charts=$(ct list-changed --config ct.yaml)
echo "Changed charts: $changed_charts"
echo "changed_charts=$changed_charts" >> $GITHUB_ENV
- name: Package and Release Charts
run: |
for CHART in ${{ steps.list-changed.outputs.changed_charts }}; do
echo "Packaging $CHART..."
helm package charts/$CHART
done
- name: Update Chart Dependencies for redis-operator
run: helm dependency update charts/redis-operator
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0

View File

@ -8,30 +8,51 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
uses: actions/checkout@v2
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1.8.0
with:
cluster_name: kind
- name: Install Kind
uses: helm/kind-action@v1.2.0
- name: Install Helm
uses: azure/setup-helm@v3
uses: azure/setup-helm@v1
with:
version: v3.16.2
version: '3.6.0'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Create Kind Cluster
run: |
kind create cluster
- uses: actions/setup-python@v4
with:
python-version: '3.9'
check-latest: true
- name: Install yq
run: |
sudo snap install yq
- name: Install and test Helm charts
- name: Install cert-manager CRDs and Helm Chart
run: |
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.5.3 \
--set installCRDs=true
- name: Ensure cert-manager is fully deployed
run: |
kubectl rollout status deploy/cert-manager -n cert-manager
kubectl rollout status deploy/cert-manager-cainjector -n cert-manager
kubectl rollout status deploy/cert-manager-webhook -n cert-manager
- name: Install and test Redis Related Helm charts
run: |
kubectl cluster-info --context kind-kind
changed=$(ct list-changed --config ct.yaml)
ct install --config ct.yaml || true
chart_dirs=("redis-operator" "redis" "redis-cluster" "redis-replication" "redis-sentinel")
for dir in "${chart_dirs[@]}"
do
chart_version=$(yq e .version ./charts/$dir/Chart.yaml)
echo "Installing $dir chart with version $chart_version..."
helm install $dir ./charts/$dir/ --version $chart_version
helm test $dir
done
echo "Listing installed Helm charts..."
helm ls

4
.gitignore vendored
View File

@ -1,3 +1 @@
*.tgz
Chart.lock
.DS_Store
*.tgz

View File

@ -14,6 +14,14 @@ helm repo add ot-helm https://ot-container-kit.github.io/helm-charts
You can then run `helm search repo ot-helm` to see the charts.
### Helm Charts List
Currently supported helm charts are:-
- [Redis Operator](./charts/redis-operator)
- [Redis Standalone](./charts/redis)
- [Redis Cluster](./charts/redis-cluster)
- [K8s Vault Webhook](./charts/k8s-vault-webhook)
### Pre-Requisities

View File

@ -1,21 +0,0 @@
---
apiVersion: v1
description: A base helm chart which will be used by different helm charts
engine: gotpl
maintainers:
- name: iamabhishek-dubey
email: "abhishek.dubey@opstree.com"
url: https://github.com/iamabhishek-dubey
name: base
sources:
- https://github.com/ot-container-kit/helm-charts
version: 0.1.0
appVersion: "0.1.0"
home: https://github.com/ot-container-kit/helm-charts
keywords:
- deployment
- base
- opstree
- kubernetes
- openshift
icon: https://raw.githubusercontent.com/OT-CONTAINER-KIT/helm-charts/main/static/helm-chart-logo.svg

View File

@ -1,28 +0,0 @@
# base
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square)
A base helm chart which will be used by different helm charts.
**Homepage:** <https://github.com/ot-container-kit/helm-charts>
## Maintainers
| Name | Email | Url |
|-------------------|----------------------------|----------------------------------------|
| iamabhishek-dubey | abhishek.dubey@opstree.com | <https://github.com/iamabhishek-dubey> |
## Source Code
* <https://github.com/ot-container-kit/helm-charts>
## Values
| Key | Type | Default | Description |
|----------------------------|--------|---------|--------------------------------------------------------------------------------|
| config | object | `{}` | ConfigMap key value pair to create configs |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.name | string | `""` | If not set and create is true, a name is generated using the fullname template |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@ -1,13 +0,0 @@
{{- define "configmap" -}}
{{- if .Values.base.config -}}
{{- $top := . -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "base.fullname" . }}
labels:
{{- include "base.labels" . | nindent 4 }}
data:
{{- toYaml .Values.base.config | nindent 2 -}}
{{- end -}}
{{- end -}}

View File

@ -1,42 +0,0 @@
{{/*
Create a default fully qualified app name.
We truncate service name aka .Release.Name at 59 chars because some Kubernetes name fields are limited to 63 (by the DNS naming spec).
We append 4 characters for chart type at the end which is -web or -crn or -wrk or -job or -sts.
*/}}
{{- define "base.fullname" -}}
{{- $name := .Release.Name | trunc 59 | trimSuffix "-" }}
{{- printf "%s-%s" $name .Chart.Name }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "base.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "base.labels" -}}
helm.sh/chart: {{ include "base.chart" . }}
{{ include "base.selectorLabels" . }}
{{- if .Release.Revision }}
app.kubernetes.io/version: {{ .Release.Revision | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "base.selectorLabels" -}}
app.kubernetes.io/name: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "base.serviceAccountName" -}}
{{- default (include "base.fullname" .) .Values.base.serviceAccount.name }}
{{- end }}

View File

@ -1,12 +0,0 @@
{{- define "serviceAccount" -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "base.serviceAccountName" . }}
labels:
{{- include "base.labels" . | nindent 4 }}
{{- with .Values.base.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,13 +0,0 @@
# Default values for base template.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
serviceAccount:
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# -- If not set and create is true, a name is generated using the fullname template
name: ""
# -- ConfigMap key value pair to create configs
config: {}

View File

@ -1,16 +0,0 @@
apiVersion: v2
name: ingress-management
description: A Helm chart to manage Ingress traffic
version: 0.1.0
appVersion: "1.0"
home: https://github.com/ot-container-kit/helm-charts
maintainers:
- name: sharvarikhamkar1304
keywords:
- ingress
- kong
- httpRoute
- kubernetes
icon: https://raw.githubusercontent.com/OT-CONTAINER-KIT/helm-charts/main/static/helm-chart-logo.svg
sources:
- https://github.com/ot-container-kit/helm-charts

View File

@ -1,49 +0,0 @@
# Ingress Management Helm Chart
A simple and reusable Helm chart to manage Kubernetes Gateway API HTTPRoutes for routing traffic to backend services.
This chart helps manage HTTPRoute resources to expose services using the Kubernetes Gateway API. You can customize host, path, service, and namespace via values.
## Homepage
[https://github.com/ot-container-kit/helm-charts](https://github.com/ot-container-kit/helm-charts)
## Maintainers
| Name | URL |
| ---------------- | --------------------------------------------- |
| sharvari-khamkar | [GitHub](https://github.com/sharvari-khamkar) |
## Source Code
[GitHub - ot-container-kit/helm-charts](https://github.com/ot-container-kit/helm-charts)
## Requirements
| Repository | Name | Version |
| ------------------------------------------------------------------------------------------------ | ---- | ------- |
| [https://ot-container-kit.github.io/helm-charts](https://ot-container-kit.github.io/helm-charts) | base | 0.1.0 |
## Values
| **Attribute** | **Scope** | **Example** | **Description** | **Default** |
|------------------|------------------|------------------------|------------------------------------------------------------------------|--------------|
| <br> `name` <br> <br> | <br> Global <br> <br> | <br> `"my-app"` <br> <br> | <br> Name of the HTTPRoute and backend service (the app name)<br><br> | `""` |
| <br> `namespace` <br> <br> | <br> Global <br> <br> | <br> `"default"` <br> <br> | <br> Kubernetes namespace where resources like HTTPRoute will be deployed<br><br> | `""` |
| <br> `host` <br> <br> | Routing | `"app.example.com"` | Hostname to expose the app<br><br> | `""` |
| <br>`path` <br> <br> | Routing | `"/api"` | Path under the host<br><br> | `""` |
| <br>`service.name` <br> <br> | Service Config | `"my-backend-svc"` | Name of the backend service to which traffic will be routed<br><br> | `""` |
| <br>`service.kind` <br> <br> | Service Config | `"Service"` | Kind of backend resource (Service by default)<br><br> | `"Service"` |
| <br>`service.port` <br> <br> | Service Config | `80` | Port on which the backend service listens<br><br> | `80` |

View File

@ -1,46 +0,0 @@
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: {{ required "A valid 'name' is required!" .Values.name }}
{{- if .Values.labels }}
labels:
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | indent 4 }}
{{- end }}
spec:
{{- if .Values.parentRefs }}
parentRefs:
{{- range .Values.parentRefs }}
- name: {{ .name }}
{{- if .namespace }}
namespace: {{ .namespace }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.hostnames }}
hostnames:
{{- range .Values.hostnames }}
- "{{ . }}"
{{- end }}
{{- end }}
rules:
{{- range .Values.rules }}
- matches:
{{- range .matches }}
- path:
type: {{ .path.type }}
value: {{ .path.value | quote }}
{{- end }}
backendRefs:
{{- range .backendRefs }}
- name: {{ .name }}
kind: {{ .kind | default "Service" }}
port: {{ .port }}
{{- end }}
{{- end }}

View File

@ -1,60 +0,0 @@
---
# charts/ingress-management/values.yaml
# -- Name of the HTTPRoute and backend service (typically the app name)
name: ""
# -- Labels to apply to the HTTPRoute metadata
labels:
app: ""
# -- Optional annotations to apply to the HTTPRoute resource
annotations: {}
# -- Reference to the Gateway (parentRefs)
parentRefs:
- name: ""
namespace: ""
# -- Hostnames to be matched in the HTTPRoute
hostnames:
- ""
# -- Routing rules for HTTPRoute
rules:
- matches:
- path:
type: PathPrefix
value: ""
backendRefs:
- name: ""
kind: Service
port: 80
# -----------------------------------------------------
# Example values.yaml File
# -----------------------------------------------------
# name: open-webui
# labels:
# app: open-webui
# annotations:
# konghq.com/protocols: https
# konghq.com/https-redirect-status-code: "301"
# parentRefs:
# - name: kong
# namespace: default
# hostnames:
# - bp-ai.opstree.dev
# rules:
# - matches:
# - path:
# type: PathPrefix
# value: /
# backendRefs:
# - name: open-webui
# kind: Service
# port: 80

View File

@ -1,9 +0,0 @@
apiVersion: v2
name: ot-karpenter
version: 0.3.0
maintainers:
- name: opstree
dependencies:
- name: karpenter
version: 1.1.1
repository: oci://public.ecr.aws/karpenter

View File

@ -1,78 +0,0 @@
# Karpenter
Karpenter is an open-source Kubernetes cluster autoscaler built for efficiency and speed. This Helm chart installs Karpenter in your Kubernetes cluster and can be used to manage your node pools for dynamically scaling your infrastructure. This chart supports automated deployment of Karpenter, including the creation of NodePools, EC2NodeClasses, IAM roles, and other necessary resources.
To install Karpenter, use the following commands:
```shell
$ helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/
$ helm install karpenter ot-helm/karpenter --namespace <namespace> --dependency-update --create-namespace
```
Adds the ot-helm repository to Helm, which contains the Karpenter Helm chart.
Installs the Karpenter chart from the ot-helm repository.
To upgrade the setup:
```shell
$ helm upgrade karpenter ot-helm/karpenter --install --namespace <namespace> --create-namespace
```
Upgrades an existing Karpenter release or installs it if it doesn't exist.
To uninstall the chart:
```shell
$ helm delete karpenter --namespace <namespace>
```
Deletes the Karpenter release from the specified namespace.
Replace <namespace> with the namespace where Karpenter is installed.
### Pre-Requisites
- Kubernetes => 1.18+
- Helm => 3.X
- Karpenter Operator => 0.1.0
- Open ID Connector (EKS) => https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html
- IAM Roles for Karpenter
- Add tags to subnets and security groups
- Update aws-auth ConfigMap
### Parameters
| **Name** | **Value** | **Description** |
|--------------------------------------------------------------------|:-------------------------------|------------------------------------------------|
| `karpenter.settings.clusterName` | `my-cluster` | The name of your Kubernetes cluster |
| `karpenter.serviceAccount.annotations.eks.amazonaws.com/role-arn` | Required | IAM role ARN for Karpenter controller |
| `karpenter.controller.resources.requests.cpu` | `1` | CPU request for Karpenter controller |
| `karpenter.controller.resources.requests.memory` | `1Gi` | Memory request for Karpenter controller |
| `karpenter.controller.resources.limits.cpu` | `1` | CPU limit for Karpenter controller |
| `karpenter.controller.resources.limits.memory` | `1Gi` | Memory limit for Karpenter controller |
| `nodePools` | [] | List of NodePools to be created |
| `nodePools.name` | default-nodepool | Name of the NodePool |
| `nodePools.labels` - If not required can be omitted | {} | Labels for the NodePool |
| `nodePools.annotations` - If not required can be omitted | {} | Annotations for the NodePool |
| `nodePools.requirements` - Can be empty [] | [] | Node requirements like CPU, memory, etc. |
| `nodePools.taints` - If not required can be omitted | [] | Taints for the NodePool |
| `nodePools.expireAfter` | 720h | Expiration duration for idle NodePools |
| `nodePools.limits.cpu` - Required Field | "1000m" | CPU limit for the NodePool |
| `nodePools.limits.memory`- If not required can be omitted | "2Gi" | Memory limit for the NodePool |
| `nodePools.disruption.consolidationPolicy` - Required Field | WhenEmptyOrUnderutilized | Consolidation policy for underutilized nodes |
| `nodePools.disruption.consolidateAfter` - Required Field | 1m | Time before consolidating underutilized nodes |
### Notes:
- Refer to Example Folder for a example values.yaml file
- Karpenter automatically creates and manages NodePools as part of the installation process.
- Make sure to configure the IAM roles required by Karpenter for it to interact with EC2 instances and manage resources along with all prerequisites.
- The chart will ensure the Karpenter controller and NodePools are deployed correctly with all required configurations.

View File

@ -1,82 +0,0 @@
#This example below has 2 nodepools for reference
# Custom values for your chart
clusterName: "" # Name of the EKS cluster (for identification in the chart and Karpenter)
awsPartition: "" # AWS partition, default is 'aws' (used in multi-region or partitioned environments)
awsAccountId: 3333 # AWS account ID where the resources will be provisioned
# Karpenter chart overrides
karpenter:
settings:
clusterName: "" # Cluster name for the Karpenter controller to identify and manage nodes in this cluster
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::3333:role/KarpenterControllerRole-demo-eks # IAM role for Karpenter controller's access to AWS services
controller:
resources:
requests:
cpu: "1" # CPU resource request for the Karpenter controller (minimum resources Karpenter will be allocated)
memory: "1Gi" # Memory resource request for the Karpenter controller
limits:
cpu: "1" # CPU resource limit for the Karpenter controller (maximum resources Karpenter can consume)
memory: "1Gi" # Memory resource limit for the Karpenter controller
# NodePools define groups of nodes with specific requirements
nodePools:
- name: default # Name of the node pool, used for identification
limits: # Required Field
cpu: "1000"
memory: "1000Gi"
disruption: # Required Field
consolidationPolicy: WhenEmptyOrUnderutilized
consolidateAfter: 1m
requirements: # Node pool requirements for instance types and other properties
- key: kubernetes.io/arch
operator: In # Specifies the architecture for nodes
values:
- "amd64"
- key: kubernetes.io/os
operator: In # Specifies the OS type for nodes
values:
- "linux" # The node pool requires Linux OS
- key: karpenter.sh/capacity-type
operator: In # Specifies the capacity type for nodes
values:
- "on-demand"
- key: karpenter.k8s.aws/instance-category
operator: In # Specifies allowed EC2 instance categories
values:
- "t" # Instance category t (e.g., T2, T3)
- "m"
- "r"
minValues: 2 # Minimum number of instances of each category
- key: karpenter.k8s.aws/instance-family
operator: Exists # Specifies that instances in the family must exist (e.g., m5, r5)
minValues: 5 # Minimum number of instances in the specified family
- key: karpenter.k8s.aws/instance-family
operator: In # Specifies that the instance family must match one of the listed values
values:
- "m5"
- "m5d"
- "c5"
- "c5d"
- "c4"
- "r4"
minValues: 3 # Minimum number of instances from these families
- key: node.kubernetes.io/instance-type
operator: Exists # Ensures that the node pool has specific instance types
minValues: 10 # Minimum number of instances of the specified types
- key: karpenter.k8s.aws/instance-generation
operator: Gt # Specifies that the instance generation must be greater than a particular value
values:
- "2" # Instance generation must be greater than 2 (i.e., newer generation)
nodeClass:
group: karpenter.k8s.aws # Node class group for Karpenter
kind: EC2NodeClass # Kind of node class, EC2NodeClass indicates AWS EC2 instances
name: default # The name of the node class (default for this pool)

View File

@ -1,33 +0,0 @@
{{- range .Values.ec2NodeClasses }}
apiVersion: karpenter.k8s.aws/v1
kind: EC2NodeClass
metadata:
name: {{ .name }}
spec:
amiFamily: {{ .amiFamily | default "AL2" }}
role: {{ .role }}
{{- if .detailedMonitoring }}
detailedMonitoring: {{ .detailedMonitoring }}
{{- end }}
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: "{{ $.Values.clusterName }}"
securityGroupSelectorTerms:
- tags:
karpenter.sh/discovery: "{{ $.Values.clusterName }}"
amiSelectorTerms:
- id: "{{ .amiSelector.arm }}"
- id: "{{ .amiSelector.amd }}"
{{- if .amiSelector.gpu }}
- id: "{{ .amiSelector.gpu }}"
{{- end }}
{{- if .amiSelector.name }}
- name: "{{ .amiSelector.name }}"
{{- end }}
{{- if .tags }}
tags:
{{- range $key, $value := .tags }}
{{ $key }}: "{{ $value }}"
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,73 +0,0 @@
{{- range .Values.nodePools }}
---
apiVersion: karpenter.sh/v1
kind: NodePool
metadata:
name: {{ .name }}
spec:
template:
metadata:
labels:
{{- if .labels }}
{{- range $key, $value := .labels }}
{{ $key }}: {{ $value }}
{{- end }}
{{- else }}
{} # Empty labels object if no labels are defined
{{- end }}
annotations:
{{- if .annotations }}
{{- range $key, $value := .annotations }}
{{ $key }}: {{ $value }}
{{- end }}
{{- else }}
{} # Empty annotations object if no annotations are defined
{{- end }}
spec:
requirements:
{{- if .requirements }}
{{- if gt (len .requirements) 0 }}
{{- range .requirements }}
- key: {{ .key }}
operator: {{ .operator }}
values:
{{ toYaml .values | indent 12 }}
{{- if .minValues }}
minValues: {{ .minValues }}
{{- end }}
{{- end }}
{{- else }}
[] # Render an empty array explicitly when no requirements are defined
{{- end }}
{{- else }}
[] # Ensure that an empty array is rendered even if the user does not specify requirements
{{- end }}
taints:
{{- if .taints }}
{{- range .taints }}
- key: {{ .key }}
{{- if .value }}
value: {{ .value }}
{{- end }}
effect: {{ .effect }}
{{- end }}
{{- else }}
[] # Empty taints array if no taints are defined
{{- end }}
nodeClassRef:
group: {{ .nodeClass.group | default "karpenter.k8s.aws" }}
kind: {{ .nodeClass.kind | default "EC2NodeClass" }}
name: {{ .nodeClass.name }}
expireAfter: {{ .expireAfter | default "720h" }}
limits:
{{- if .limits.cpu }}
cpu: {{ .limits.cpu }}
{{- end }}
{{- if .limits.memory }}
memory: {{ .limits.memory }}
{{- end }}
disruption:
consolidationPolicy: {{ .disruption.consolidationPolicy | default "WhenEmptyOrUnderutilized" }}
consolidateAfter: {{ .disruption.consolidateAfter | default "1m" }}
{{- end }}

View File

@ -1,110 +0,0 @@
# Custom values for your chart
# Name of the EKS cluster (for identification in the chart and Karpenter)
clusterName: ""
# AWS partition, default is 'aws' (used in multi-region or partitioned environments)
awsPartition: ""
# AWS account ID where the resources will be provisioned
awsAccountId: 3333
# Karpenter chart overrides
karpenter:
settings:
# Cluster name for the Karpenter controller to identify and manage nodes in this cluster
clusterName: ""
# Name of SQS queue for handling EC2 instance interruptions
# interruptionQueue: ""
serviceAccount:
annotations:
# IAM role ARN for Karpenter controller's access to AWS services
eks.amazonaws.com/role-arn: arn:aws:iam::3333:role/KarpenterControllerRole-demo-eks
# Karpenter controller resources can be customized in this section below
# controller:
# resources:
# requests:
# cpu: "1" # CPU resource request for the Karpenter controller (minimum resources Karpenter will be allocated)
# memory: "1Gi" # Memory resource request for the Karpenter controller
# limits:
# cpu: "1" # CPU resource limit for the Karpenter controller (maximum resources Karpenter can consume)
# memory: "1Gi" # Memory resource limit for the Karpenter controller
# EC2NodeClasses define the EC2 instance classes that Karpenter can use
ec2NodeClasses:
- name: default
# Amazon Linux 2 AMI family
amiFamily: AL2
# "KarpenterNodeRole-my-eks-cluster" # Name of karpenter Node Role ( NOT THE ARN )
role:
amiSelector:
# To get the AMI ID, run the commands below in the AWS CLI and replace the AMI ID in the values.yaml file
# ARM_AMI_ID="$(aws ssm get-parameter --name /aws/service/eks/optimized-ami/${K8S_VERSION}/amazon-linux-2-arm64/recommended/image_id --query Parameter.Value --output text)"
arm:
# AMD_AMI_ID="$(aws ssm get-parameter --name /aws/service/eks/optimized-ami/${K8S_VERSION}/amazon-linux-2/recommended/image_id --query Parameter.Value --output text)"
amd:
# GPU_AMI_ID="$(aws ssm get-parameter --name /aws/service/eks/optimized-ami/${K8S_VERSION}/amazon-linux-2-gpu/recommended/image_id --query Parameter.Value --output text)"
# gpu: ami-gpu-id
# amazon-eks-node-1.27-* # Optional: EKS Node AMI Name
# name:
# Optional, propagates tags to underlying EC2 resources
# tags:
# environment: production
# team: "engineering"
# owner: "admin@company.com"
# Enable detailed monitoring for the EC2 instance
# detailedMonitoring: true
# NodePools define groups of nodes with specific requirements
nodePools:
- name: default # Name of the node pool, preset here is set to default nodepool
requirements: # List of node requirements for scheduling
- key: kubernetes.io/arch # Architecture requirement (e.g., amd64, arm64)
operator: In # Only nodes with the specified architecture will be selected
values:
- "amd64" # Specifies that the node should have an amd64 architecture
- key: kubernetes.io/os # OS requirement (e.g., linux, windows)
operator: In # Only nodes with the specified OS will be selected
values:
- "linux" # Specifies that the node should run Linux
- key: karpenter.sh/capacity-type # Defines the instance's capacity type
operator: In # Only nodes with the specified capacity type will be selected
values:
- "on-demand" # Specifies that the node should be an on-demand instance, can be "spot" as well
- key: karpenter.k8s.aws/instance-category # Defines the instance category (e.g., t, m, r)
operator: In # Only nodes with the specified instance category will be selected
values:
- "t" # These can be customized as per need
- "m"
- "r"
# - key: karpenter.k8s.aws/instance-family # Uncomment to define the instance family (e.g., t3, m5, r5)
# operator: In
# values:
# - "t3a"
- key: karpenter.k8s.aws/instance-generation # Instance generation requirement
operator: Gt # Greater than the specified value
values:
- "2" # Specifies that only instance generations greater than 2 are allowed
nodeClass: # Defines the node class, which is linked to EC2NodeClass
group: karpenter.k8s.aws # Group of the EC2NodeClass
kind: EC2NodeClass # Type of node class, which is EC2NodeClass in this case
name: default # Name of the EC2NodeClass to use for the node pool (name of the EC2 instance class)
expireAfter: 720h # Maximum lifetime of the node pool before it expires (720 hours = 30 days)
limits: # Resource limits for the node pool
cpu: "1000" # Maximum CPU limit for the node pool
memory: "1Gi"
disruption: # Policy for handling disruption in the node pool
consolidationPolicy: WhenEmptyOrUnderutilized # Consolidate nodes when they are empty or underutilized
consolidateAfter: 1m # Time after which consolidation will occur, in this case, 1 minute
# Uncomment Below annotations key ( next 3 Lines ) if you want to use annotations
# annotations: # Annotations are key-value pairs that provide additional metadata for the node pool
# example.com/owner: "my-team" # An example annotation that associates the node pool with a team
# example.com/maintainer: "admin@company.com" # Example annotation for the maintainer's contact information
# Uncomment below taint key ( next 4 Lines ) if you want to use taints
# taints: # Taints are used to control which pods can be scheduled on the node pool
# - key: "example.com/special-taint" # Taint key that identifies the taint
# value: "special-value" # Value associated with the taint
# effect: "NoExecute" # Effect of the taint. In this case, NoExecute means pods won't be scheduled on tainted nodes
# Comment Labels Key below if you dont want to use Labels
labels: # Labels are key-value pairs used for categorizing the node pool
environment: production # Label indicating that this node pool is for production use
team: "engineering" # Label associating the node pool with the engineering team

View File

@ -1,27 +0,0 @@
apiVersion: v2
name: loki
description: A Helm chart for loki
type: application
version: 1.0.1
appVersion: 1.0.0
dependencies:
- name: loki-distributed
version: 0.76.1
repository: https://grafana.github.io/helm-charts
alias: distributed
tags:
- logging
condition: distributed.enabled
- name: promtail
version: 6.16.4
repository: https://grafana.github.io/helm-charts
alias: promtail
tags:
- logging
- name: loki
version: 6.7.3
repository: https://grafana.github.io/helm-charts
alias: standalone
tags:
- logging
condition: standalone.enabled

View File

@ -1,501 +0,0 @@
logging:
gateway:
# image:
# registry:
# repository:
# tag: 1.20.2-alpine
enabled: true
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 2
resources:
requests:
memory: 500Mi
cpu: 200m
limits:
memory: 500Mi
cpu: 200m
nginxConfig:
file: |
worker_processes 5; ## Default: 1
error_log /dev/stderr;
pid /tmp/nginx.pid;
worker_rlimit_nofile 8192;
events {
worker_connections 4096; ## Default: 1024
}
http {
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
client_max_body_size 5M;
proxy_http_version 1.1;
default_type application/octet-stream;
log_format {{ .Values.gateway.nginxConfig.logFormat }}
{{- if .Values.gateway.verboseLogging }}
access_log /dev/stderr main;
{{- else }}
map $status $loggable {
~^[23] 0;
default 1;
}
access_log /dev/stderr main if=$loggable;
{{- end }}
sendfile on;
tcp_nopush on;
{{- if .Values.gateway.nginxConfig.resolver }}
resolver {{ .Values.gateway.nginxConfig.resolver }};
{{- else }}
resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }};
{{- end }}
{{- with .Values.gateway.nginxConfig.httpSnippet }}
{{ . | nindent 2 }}
{{- end }}
server {
listen 8080;
{{- if .Values.gateway.basicAuth.enabled }}
auth_basic "Loki";
auth_basic_user_file /etc/nginx/secrets/.htpasswd;
{{- end }}
location = / {
return 200 'OK';
auth_basic off;
access_log off;
}
location = /api/prom/push {
set $api_prom_push_backend http://{{ include "loki.distributorFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
proxy_pass $api_prom_push_backend:3100$request_uri;
proxy_http_version 1.1;
}
location = /api/prom/tail {
set $api_prom_tail_backend http://{{ include "loki.querierFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
proxy_pass $api_prom_tail_backend:3100$request_uri;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
}
# Ruler
location ~ /prometheus/api/v1/alerts.* {
proxy_pass http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
}
location ~ /prometheus/api/v1/rules.* {
proxy_pass http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
}
location ~ /api/prom/rules.* {
proxy_pass http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
}
location ~ /api/prom/alerts.* {
proxy_pass http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
}
location ~ /api/prom/.* {
set $api_prom_backend http://{{ include "loki.queryFrontendFullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
proxy_pass $api_prom_backend:3100$request_uri;
proxy_http_version 1.1;
}
location = /loki/api/v1/push {
set $loki_api_v1_push_backend http://{{ include "loki.distributorFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
proxy_pass $loki_api_v1_push_backend:3100$request_uri;
proxy_http_version 1.1;
}
location = /loki/api/v1/tail {
set $loki_api_v1_tail_backend http://{{ include "loki.querierFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
proxy_pass $loki_api_v1_tail_backend:3100$request_uri;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
}
location ~ /loki/api/.* {
set $loki_api_backend http://{{ include "loki.queryFrontendFullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
proxy_pass $loki_api_backend:3100$request_uri;
proxy_http_version 1.1;
}
{{- with .Values.gateway.nginxConfig.serverSnippet }}
{{ . | nindent 4 }}
{{- end }}
}
}
loki:
# image:
# registry:
# repository: grafana/loki
# tag: 2.9.2
podAnnotations:
sidecar.istio.io/inject: "false"
storageConfig:
aws:
s3: http://minio:minio123@monitoring-minio.monitoring.svc:9000/loki
s3forcepathstyle: true
region: us-east-1
# aws:
# region: ap-south-1
# bucketnames: jm-prod-loki-app-logs
# s3forcepathstyle: false
# sse_encryption: true
boltdb_shipper:
shared_store: s3
cache_ttl: 24h
schemaConfig:
configs:
- from: "2020-09-07"
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: loki_index_
period: 24h
config: |
auth_enabled: false
server:
{{- toYaml .Values.loki.server | nindent 6 }}
common:
compactor_address: http://{{ include "loki.compactorFullname" . }}:3100
distributor:
ring:
kvstore:
store: memberlist
memberlist:
join_members:
- {{ include "loki.fullname" . }}-memberlist
ingester_client:
grpc_client_config:
grpc_compression: gzip
ingester:
lifecycler:
ring:
kvstore:
store: memberlist
replication_factor: 1
chunk_idle_period: 30m
chunk_block_size: 262144
chunk_encoding: snappy
chunk_retain_period: 1m
max_transfer_retries: 0
wal:
dir: /var/loki/wal
limits_config:
retention_period: 72h
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
max_cache_freshness_per_query: 10m
split_queries_by_interval: 15m
# for big logs tune
per_stream_rate_limit: 512M
per_stream_rate_limit_burst: 1024M
cardinality_limit: 200000
ingestion_burst_size_mb: 1000
ingestion_rate_mb: 10000
max_entries_limit_per_query: 1000000
max_label_value_length: 20480
max_label_name_length: 10240
max_label_names_per_series: 300
{{- if .Values.loki.schemaConfig}}
schema_config:
{{- toYaml .Values.loki.schemaConfig | nindent 2}}
{{- end}}
{{- if .Values.loki.storageConfig}}
storage_config:
{{- if .Values.indexGateway.enabled}}
{{- $indexGatewayClient := dict "server_address" (printf "dns:///%s:9095" (include "loki.indexGatewayFullname" .)) }}
{{- $_ := set .Values.loki.storageConfig.boltdb_shipper "index_gateway_client" $indexGatewayClient }}
{{- end}}
{{- toYaml .Values.loki.storageConfig | nindent 2}}
{{- if .Values.memcachedIndexQueries.enabled }}
index_queries_cache_config:
memcached_client:
addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedIndexQueriesFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
consistent_hash: true
{{- end}}
{{- end}}
runtime_config:
file: /var/{{ include "loki.name" . }}-runtime/runtime.yaml
chunk_store_config:
max_look_back_period: 0s
{{- if .Values.memcachedChunks.enabled }}
chunk_cache_config:
embedded_cache:
enabled: false
memcached_client:
consistent_hash: true
addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedChunksFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
{{- end }}
{{- if .Values.memcachedIndexWrites.enabled }}
write_dedupe_cache_config:
memcached_client:
consistent_hash: true
addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedIndexWritesFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
{{- end }}
table_manager:
retention_deletes_enabled: false
retention_period: 0s
query_range:
align_queries_with_step: true
max_retries: 5
cache_results: true
results_cache:
cache:
{{- if .Values.memcachedFrontend.enabled }}
memcached_client:
addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedFrontendFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
consistent_hash: true
{{- else }}
embedded_cache:
enabled: true
ttl: 24h
{{- end }}
frontend_worker:
{{- if .Values.queryScheduler.enabled }}
scheduler_address: {{ include "loki.querySchedulerFullname" . }}:9095
{{- else }}
frontend_address: {{ include "loki.queryFrontendFullname" . }}-headless:9095
{{- end }}
frontend:
log_queries_longer_than: 5s
compress_responses: true
{{- if .Values.queryScheduler.enabled }}
scheduler_address: {{ include "loki.querySchedulerFullname" . }}:9095
{{- end }}
tail_proxy_url: http://{{ include "loki.querierFullname" . }}:3100
compactor:
working_directory: /tml/loki/compactor
shared_store: s3
compaction_interval: 2m
retention_enabled: false
ruler:
storage:
type: local
local:
directory: /etc/loki/rules
ring:
kvstore:
store: memberlist
rule_path: /tmp/loki/scratch
alertmanager_url: https://alertmanager.xx
external_url: https://alertmanager.xx
serviceAccount:
create: true
name: loki-sa
imagePullSecrets: []
labels: {}
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::913108190184:role/jm-prod-fluent
automountServiceAccountToken: true
compactor:
enabled: true
retention_enabled: true
shared_store: s3
# nodeSelector:
# appType: monitoring
# tolerations:
# - key: "appType"
# operator: "Equal"
# value: "monitoring"
# effect: "NoSchedule"
queryFrontend:
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 2
resources:
requests:
memory: 500Mi
cpu: 200m
limits:
memory: 500Mi
cpu: 200m
distributor:
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 2
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
ingester:
replicas: 2
maxUnavailable: 1
persistence:
enabled: true
claims:
- name: data
size: 1Gi
# storageClass: encrypted-gp3
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
# nodeSelector:
# appType: monitoring
# tolerations:
# - key: "appType"
# operator: "Equal"
# value: "monitoring"
# effect: "NoSchedule"
# affinity: ""
querier:
kind: Deployment
replicas: 1
maxUnavailable: 1
# persistence:
# enabled: true
# size: 10Gi
# storageClass: encrypted-gp3
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 2
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
memcachedChunks:
enabled: true
replicas: 1
maxUnavailable: 1
persistence:
enabled: true
size: 1Gi
# storageClass: encrypted-gp3
extraArgs:
- -m 2048
- -I 32m
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
memcachedFrontend:
enabled: true
replicas: 1
maxUnavailable: 1
persistence:
enabled: true
size: 1Gi
# storageClass: encrypted-gp3
extraArgs:
- -m 2048
- -I 32m
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
memcachedIndexQueries:
enabled: true
replicas: 1
maxUnavailable: 1
persistence:
enabled: true
size: 1Gi
# storageClass: encrypted-gp3
extraArgs:
- -m 2048
- -I 32m
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
indexGateway:
enabled: true
replicas: 2
maxUnavailable: 1
persistence:
enabled: true
size: 1Gi
# storageClass: encrypted-gp3
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
# serviceMonitor:
# enabled: true
# namespace: logging
# namespaceSelector:
# any: true
# labels:
# prometheus: kube
# prometheusRule:
# enabled: false
# namespace: logging
# annotations: {}
# labels:
# app: loki-kube-prometheus
# prometheus: kube
# groups: []
promtail:
config:
logLevel: info
clients:
- url: http://loki-logging-gateway.logging.svc.cluster.local/loki/api/v1/push

View File

@ -1,86 +0,0 @@
logging:
loki:
storage:
type: filesystem
auth_enabled: false
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: 2024-04-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
ingester:
chunk_encoding: snappy
tracing:
enabled: true
querier:
# Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
max_concurrent: 2
deploymentMode: SingleBinary
lokiCanary:
enabled: false
test:
enabled: false
singleBinary:
replicas: 1
resources:
limits:
cpu: 3
memory: 4Gi
requests:
cpu: 2
memory: 2Gi
extraEnv:
# Keep a little bit lower than memory limits
- name: GOMEMLIMIT
value: 3750MiB
chunksCache:
# default is 500MB, with limited memory keep this smaller
writebackSizeLimit: 10MB
allocatedMemory: 1024
# Enable minio for storage
minio:
enabled: false
persistence:
size: 10Gi
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
promtail:
config:
logLevel: info
clients:
- url: http://logging-gateway/loki/api/v1/push

View File

@ -1,90 +0,0 @@
distributed:
enabled: false
standalone:
enabled: true
loki:
storage:
type: filesystem
auth_enabled: false
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: 2024-04-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
ingester:
chunk_encoding: snappy
tracing:
enabled: true
querier:
# Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
max_concurrent: 2
deploymentMode: SingleBinary
lokiCanary:
enabled: false
test:
enabled: false
singleBinary:
replicas: 1
resources:
limits:
cpu: 3
memory: 4Gi
requests:
cpu: 2
memory: 2Gi
extraEnv:
# Keep a little bit lower than memory limits
- name: GOMEMLIMIT
value: 3750MiB
chunksCache:
# default is 500MB, with limited memory keep this smaller
writebackSizeLimit: 10MB
allocatedMemory: 1024
# Enable minio for storage
minio:
enabled: false
persistence:
size: 10Gi
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
promtail:
config:
logLevel: info
clients:
- url: http://logging-gateway/loki/api/v1/push

View File

@ -1,10 +0,0 @@
apiVersion: v2
name: microservice
description: Basic helm chart for deploying microservices on kubernetes with best practices
type: application
version: 0.1.8
appVersion: "0.1.2"
maintainers:
- name: ashwani-opstree
- name: tripathishikha1
- name: khushimalhoz

View File

@ -1,45 +0,0 @@
# microservice
Basic helm chart for deploying microservices on kubernetes with best practices
![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.1.2](https://img.shields.io/badge/AppVersion-0.1.2-informational?style=flat-square)
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Ashwani Singh | <ashwani.singh@opstree.com> | |
| Shikha Tripathi | | |
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release microservice/
```
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| deployment | object | `{"affinity":{},"annotations":{},"environment":{},"image":{"name":"","pullPolicy":"IfNotPresent","tag":""},"livenessProbe":{"failureThreshold":5,"initialDelaySeconds":250,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"nodeSelector":{},"readinessProbe":{"failureThreshold":5,"initialDelaySeconds":30,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"resources":{},"tolerations":[],"volumeMounts":[],"volumes":{"configMaps":null,"enabled":true,"pvc":{"accessModes":["ReadWriteOnce"],"class":"default","enabled":false,"existing_claim":false,"mountPath":"/pv","name":"pvc","size":"1G"}}}` | Object that configures Deployment instance |
| deployment.image | object | `{"name":"","pullPolicy":"IfNotPresent","tag":""}` | Override default container image format |
| global | object | `{"environment":{},"fullnameOverride":"","imagePullSecrets":[],"nameOverride":"","namespace":"default","replicaCount":1}` | global variables |
| hpa.enabled | bool | `true` | |
| hpa.maxReplicas | int | `1` | |
| hpa.minReplicas | int | `1` | |
| hpa.targetCPU | int | `80` | |
| hpa.targetMemory | int | `80` | |
| kubeVersion | string | `""` | |
| service.annotations | object | `{}` | |
| service.specs[0].name | string | `"http"` | |
| service.specs[0].port | int | `80` | |
| service.type | string | `"ClusterIP"` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.automount | bool | `true` | |
| serviceAccount.create | bool | `false` | |
| serviceAccount.name | string | `""` | |
> **_NOTE:_** Please find the sample helm values yaml in example repository.

View File

@ -1,22 +0,0 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
{{ template "chart.maintainersSection" . }}
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install my-release microservice/
```
{{/* {{ template "chart.requirementsSection" . }} */}}
{{ template "chart.valuesSection" . }}
> **_NOTE:_** Please find the sample helm values yaml in example repository.
{{/* {{ template "helm-docs.versionFooter" . }} */}}

View File

@ -1,2 +0,0 @@
name=opstree
address=opstreesolution

View File

@ -1,43 +0,0 @@
global:
namespace: "demo-dev"
fullnameOverride: "webapp"
deployment:
image:
name: nginx
tag: latest
pullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: "/"
port: http
readinessProbe:
httpGet:
path: "/"
port: http
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 500Mi
cpu: 500m
volumes:
enabled: true
configMaps:
- name: index
mountPath: /usr/share/nginx/html
data:
index.html: |
Hello! Opstree
topologySpreadConstraints:
whenUnsatisfiable: "DoNotSchedule"
# serviceAccount:
# create: true
# annotations: "aws arn link"
# serviceAccount:
# name: "myserviceaccount"

View File

@ -1,5 +0,0 @@
You have deployed the following release: {{ include "microservice.fullname" . }}.
To get further information, you can run the commands:
$ helm status {{ include "microservice.fullname" . }}
$ helm get all {{ include "microservice.fullname" . }}

View File

@ -1,36 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the target Kubernetes version
*/}}
{{- define "microservice.capabilities.kubeVersion" -}}
{{- default (default .Capabilities.KubeVersion.Version .Values.kubeVersion) ((.Values.global).kubeVersion) -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}}
{{- define "microservice.capabilities.hpa.apiVersion" -}}
{{- $kubeVersion := include "microservice.capabilities.kubeVersion" .context -}}
{{- if and (not (empty $kubeVersion)) (semverCompare "<1.23-0" $kubeVersion) -}}
{{- if .beta2 -}}
{{- print "autoscaling/v2beta2" -}}
{{- else -}}
{{- print "autoscaling/v2beta1" -}}
{{- end -}}
{{- else -}}
{{- print "autoscaling/v2" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "microservice.capabilities.deployment.apiVersion" -}}
{{- $kubeVersion := include "microservice.capabilities.kubeVersion" . -}}
{{- if and (not (empty $kubeVersion)) (semverCompare "<1.14-0" $kubeVersion) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}

View File

@ -1,54 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create a defautl fully qualified app name
It will use the release name to give the app name
*/}}
{{- define "microservice.name" -}}
{{- default .Chart.Name .Values.global.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "microservice.fullname" -}}
{{- if .Values.global.fullnameOverride -}}
{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.global.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "microservice.labels" -}}
app: {{ include "microservice.fullname" . }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "microservice.selectorLabels" -}}
app: {{ include "microservice.fullname" . }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "microservice.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "microservice.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -1,30 +0,0 @@
#ConfigMap mounted as volumes
{{- if .Values.deployment.volumes.configMaps }}
{{- if .Values.deployment.volumes.enabled }}
{{ $header := .Values.deployment.volumes.configFileCommonHeader | default "" }}
{{ $root := . }}
{{ range $cm := .Values.deployment.volumes.configMaps}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "microservice.fullname" $root }}-{{ $cm.name }}-cm
namespace: {{ $root.Values.global.namespace | quote }}
data:
{{- if $cm.data }}
{{- range $filename, $content := $cm.data }}
# property-like keys; each key maps to a simple value
{{ $filename }}: |-
{{ $content | toString | indent 4}}
{{- end }}
{{- end }}
{{- if $cm.files }}
{{- range $file := $cm.files }}
{{ $file.destination }}: |
{{ $header | toString | indent 4 }}
{{ $root.Files.Get "$file.source" }}
{{- end}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,139 +0,0 @@
{{ $root := . }}
---
apiVersion: {{ include "microservice.capabilities.deployment.apiVersion" . }}
kind: Deployment
metadata:
name: {{ include "microservice.fullname" . }}-app
namespace: {{ .Values.global.namespace | quote }}
{{- if .Values.deployment.annotations }}
annotations:
{{- range $key, $value := .Values.deployment.annotations }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
labels:
{{- include "microservice.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.global.replicaCount }}
{{- if .Values.deployment.strategy }}
strategy:
{{- toYaml .Values.deployment.strategy | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "microservice.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "microservice.selectorLabels" . | nindent 8 }}
{{- if .Values.deployment.podAnnotations }}
annotations:
{{- range $key, $value := .Values.deployment.podAnnotations }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.serviceAccount.create }}
serviceAccountName: {{ include "microservice.serviceAccountName" . }}-sa
{{- end }}
{{- if .Values.serviceAccount.name }}
serviceAccountName: {{ .Values.serviceAccount.name }}
{{- end }}
terminationGracePeriodSeconds: {{ .Values.deployment.terminationGracePeriodSeconds }}
containers:
- name: {{ include "microservice.fullname" . }}
image: "{{ .Values.deployment.image.name }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.pullPolicy }}
{{- if .Values.deployment.command }}
command: {{ .Values.deployment.command }}
{{- end }}
{{- if .Values.deployment.args }}
args: {{ .Values.deployment.args }}
{{- end }}
ports:
{{- range .Values.service.specs}}
- name: {{ .name }}
containerPort: {{ .targetPort | default .port}}
protocol: {{ .protocol | default "TCP" }}
{{- end }}
{{- if (merge .Values.global.environment .Values.deployment.environment) }}
env:
{{- range $name, $value := merge .Values.global.environment .Values.deployment.environment}}
- name: {{ $name | quote}}
value: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if and .Values.deployment.healthProbes.enabled .Values.deployment.livenessProbe.httpGet }}
livenessProbe:
{{- toYaml .Values.deployment.livenessProbe | nindent 12 }}
{{- end }}
{{- if and .Values.deployment.healthProbes.enabled .Values.deployment.readinessProbe.httpGet }}
readinessProbe:
{{- toYaml .Values.deployment.readinessProbe | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
{{- if .Values.deployment.volumes.enabled }}
volumeMounts:
{{- range $conf := .Values.deployment.volumes.configMaps }}
- mountPath: {{ $conf.mountPath }}
name: {{ include "microservice.fullname" $root }}-{{ $conf.name }}-cm
{{- end }}
{{- if .Values.deployment.volumes.pvc.enabled }}
- mountPath: {{ .Values.volumes.pvc.mountPath }}
name: {{ .Values.volumes.pvc.existing_claim | default .Values.volumes.pvc.name }}-volume
{{- end }}
{{- end }}
{{- if .Values.deployment.volumes.enabled }}
volumes:
{{- range $conf := .Values.deployment.volumes.configMaps }}
- name: {{ include "microservice.fullname" $root }}-{{ $conf.name }}-cm
configMap:
name: {{ include "microservice.fullname" $root }}-{{ $conf.name }}-cm
{{- end }}
{{- if .Values.deployment.volumes.pvc.enabled}}
- name: {{ .Values.deployment.volumes.pvc.existing_claim | default .Values.volumes.pvc.name }}-volume
persistentVolumeClaim:
claimName: {{ .Values.deployment.volumes.pvc.existing_claim | default .Values.volumes.pvc.name }}
{{- end}}
{{- end }}
{{- with .Values.deployment.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if and .Values.deployment.affinity.enabled (or .Values.deployment.affinity.preferred.enabled .Values.deployment.affinity.required.enabled) }}
affinity:
podAntiAffinity:
{{- if .Values.deployment.affinity.preferred.enabled }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app: {{ include "microservice.fullname" . }}
topologyKey: {{ .Values.deployment.affinity.topologyKey }}
{{- end }}
{{- if and .Values.deployment.affinity.required.enabled (not .Values.deployment.affinity.preferred.enabled) }}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ include "microservice.fullname" . }}
topologyKey: {{ .Values.deployment.affinity.topologyKey }}
{{- end }}
{{- end }}
{{- if .Values.deployment.topologySpreadConstraints.enabled }}
topologySpreadConstraints:
- maxSkew: 1
topologyKey: {{ .Values.deployment.topologySpreadConstraints.topologyKey }}
whenUnsatisfiable: "{{ .Values.deployment.topologySpreadConstraints.whenUnsatisfiable }}"
labelSelector:
matchLabels:
app: {{ include "microservice.fullname" . }}
{{- if ( eq .Values.deployment.topologySpreadConstraints.whenUnsatisfiable "DoNotSchedule")}}
minDomains: 2
{{- end }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.hpa.enabled }}
apiVersion: {{ include "microservice.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "microservice.fullname" . }}-hpa
namespace: {{ .Values.global.namespace | quote }}
labels:
{{- include "microservice.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: {{ include "microservice.capabilities.deployment.apiVersion" . }}
kind: Deployment
name: {{ include "microservice.fullname" . }}-app
minReplicas: {{ .Values.hpa.minReplicas }}
maxReplicas: {{ .Values.hpa.maxReplicas }}
metrics:
{{- if .Values.hpa.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" (include "microservice.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.hpa.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.hpa.targetMemory }}
{{- end }}
{{- end }}
{{- if .Values.hpa.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" (include "microservice.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.hpa.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.hpa.targetCPU }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,21 +0,0 @@
{{- if .Values.deployment.volumes.pvc.enabled }}
{{- if .Values.deployment.volumes.pvc.existing_claim -}}
{{- else -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.deployment.volumes.pvc.name }}
namespace: {{ .Values.global.namespace | quote }}
spec:
{{- if .Values.deployment.volumes.pvc.class }}
storageClassName: {{ .Values.deployment.volumes.pvc.class }}
{{- end }}
accessModes:
{{- range $accessMode := .Values.deployment.volumes.pvc.accessModes }}
- {{ $accessMode }}
{{- end }}
resources:
requests:
storage: {{ .Values.deployment.volumes.pvc.size }}
{{- end }}
{{- end }}

View File

@ -1,34 +0,0 @@
{{- $root:= . }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "microservice.fullname" . }}-svc
namespace: {{ .Values.global.namespace | quote }}
{{- if .Values.service.annotations }}
annotations:
{{- range $key, $value := .Values.service.annotations }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
labels:
{{- include "microservice.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
selector:
{{- include "microservice.selectorLabels" . | nindent 4 }}
ports:
{{- range $spec := .Values.service.specs }}
- name: {{ $spec.name }}
port: {{ $spec.port }}
protocol: {{ $spec.protocol | default "TCP" }}
{{- if $spec.targetPort }}
targetPort: {{ $spec.targetPort }}
{{- else }}
targetPort: {{ $spec.name }}
{{- end}}
{{- if $spec.nodePort }}
nodePort: {{ $spec.nodePort }}
{{- end }}
{{- end -}}

View File

@ -1,14 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "microservice.serviceAccountName" . }}-sa
namespace: {{ .Values.global.namespace | quote }}
labels:
{{- include "microservice.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@ -1,155 +0,0 @@
# -- global variables
global:
namespace: "default"
replicaCount: 1
nameOverride: ""
fullnameOverride: ""
imagePullSecrets: []
environment: {}
# list of key: value
# GLOBAL1: value
## @param kubeVersion Override Kubernetes version
##
kubeVersion: ""
# -- Object that configures Deployment instance
deployment:
# -- Override default container image format
image:
name: ""
tag: ""
pullPolicy: IfNotPresent
strategy: {}
# Annotation for the Deployment
annotations: {}
podAnnotations: {}
terminationGracePeriodSeconds: 60
healthProbes:
enabled: true
# livenessProbe: {}
livenessProbe:
# httpGet:
# path: "/"
# port: http
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
# readinessProbe: {}
readinessProbe:
# httpGet:
# path: "/"
# port: http
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
# command: ["/bin/sh","-c"]
# args: ["echo 'consuming a message'; sleep 5"]
environment: {}
# VAR1: value1
resources: {}
# resources:
# requests:
# memory: 100Mi
# cpu: 100m
# limits:
# memory: 100Mi
# cpu: 100m
# Additional volumes on the output Deployment definition.
volumes:
enabled: true
pvc:
enabled: false
existing_claim: false
name: pvc
mountPath: /pv
size: 1G
class: "default"
accessModes:
- ReadWriteOnce
# configFileCommonHeader: |
# line1
# line2
configMaps:
# - name: test
# mountPath: /test
# data:
# test.conf: |
# hello
# hello2
# - name: test-from-file
# mountPath: /test2
# files:
# - source: config.conf
# destination: application.conf
# - name: test-mixed
# mountPath: /test3
# data:
# test2.conf: |
# another hello
# files:
# - source: config.conf
# destination: application2.conf
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity:
enabled: true
preferred:
enabled: true
required:
enabled: false
topologyKey: "topology.kubernetes.io/zone"
topologySpreadConstraints:
enabled: true
# whenUnsatisfiable: "DoNotSchedule" OR "ScheduleAnyway"
whenUnsatisfiable: "ScheduleAnyway"
topologyKey: "topology.kubernetes.io/zone"
hpa:
enabled: true
minReplicas: 1
maxReplicas: 1
targetCPU: 80
targetMemory: 80
service:
type: ClusterIP
annotations: {}
specs:
- port: 80
name: http
serviceAccount:
create: false
automount: true
annotations: {}
name: ""

View File

@ -1,14 +0,0 @@
apiVersion: v2
name: otel-operator
description: A Helm chart for Opentelemetry Operator
type: application
version: 1.0.0
appVersion: 1.0.0
dependencies:
- name: opentelemetry-operator
version: 0.64.2
repository: https://open-telemetry.github.io/opentelemetry-helm-charts/
alias: operator
tags:
- operator
condition: operator.enabled

View File

@ -1,12 +0,0 @@
operator:
enabled: true
fullnameOverride: otel
manager:
collectorImage:
repository: otel/opentelemetry-collector-contrib
tag: latest
admissionWebhooks:
certManager:
enabled: false
autoGenerateCert:
enabled: true

View File

@ -1,65 +0,0 @@
apiVersion: v2
name: pga
description: A Helm chart for prometheus, grafana and alertmanager
type: application
version: 1.0.3
appVersion: 1.0.1
maintainers:
- name: ashwani-opstree
dependencies:
- name: kube-prometheus-stack
version: 61.3.1
repository: https://prometheus-community.github.io/helm-charts/
alias: app
tags:
- monitoring
condition: app.enabled
- name: kube-prometheus-stack
version: 61.3.1
repository: https://prometheus-community.github.io/helm-charts/
alias: kube
tags:
- monitoring
condition: kube.enabled
- name: prometheus-adapter
version: 4.10.0
repository: https://prometheus-community.github.io/helm-charts/
tags:
- monitoring
alias: adapter
condition: adapter.enabled
- name: prometheus-pushgateway
version: 2.14.0
repository: https://prometheus-community.github.io/helm-charts/
tags:
- monitoring
alias: pushgateway
condition: pushgateway.enabled
- name: prometheus-blackbox-exporter
version: 8.17.0
repository: https://prometheus-community.github.io/helm-charts/
tags:
- blackbox
alias: blackbox
condition: blackbox.enabled
- name: thanos
version: 15.7.12
repository: https://charts.bitnami.com/bitnami
tags:
- thanos
alias: thanos
condition: thanos.enabled
- name: kubernetes-event-exporter
version: 3.2.10
repository: https://charts.bitnami.com/bitnami
alias: k8s-events
tags:
- monitoring
condition: k8s-events.enabled

View File

@ -1,36 +0,0 @@
# Prometheus Monitoring Setup with Helm
This document provides detailed instructions for setting up Prometheus monitoring in a Kubernetes cluster using Helm charts. Follow these commands to deploy Prometheus and its associated components.
## 1. Apply Custom Resource Definitions (CRDs)
Run the following commands to apply each CRD:
```bash
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagers.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagerconfigs.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-podmonitors.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-probes.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusagents.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-prometheuses.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusrules.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-scrapeconfigs.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-servicemonitors.yaml
kubectl apply --server-side=true -f https://raw.githubusercontent.com/prometheus-community/helm-charts/kube-prometheus-stack-61.5.0/charts/kube-prometheus-stack/charts/crds/crds/crd-thanosrulers.yaml
```
## 2. Update Helm Chart Dependencies
```bash
helm dep update
```
Updates Helm chart dependencies.
## 3. Create a Namespace for Monitoring
```bash
kubectl create ns monitoring
```
Creates a Kubernetes namespace named monitoring.
## 4. Render chart templates locally and apply
```bash
helm template --name-template=monitoring . -n monitoring -f values.yaml | kubectl apply -f -
```

View File

@ -1,52 +0,0 @@
global:
resolve_timeout: 5m
route:
group_wait: 30s
group_interval: 5m
repeat_interval: 30m
receiver: "null"
group_by:
- job
- alertname
- severity
routes:
- receiver: "null"
match:
alertname: Watchdog
- receiver: "alerts-infra"
group_wait: 10s
continue: true
match_re:
severity: warning|high
channel: slack
team: devops
receivers:
- name: "null"
# - name: "email"
# email_configs:
# - to: ''
# from: ''
# smarthost: ''
# auth_username: ''
# auth_password: ''
# require_tls: yes
# send_resolved: true
- name: "alerts-infra"
slack_configs:
- api_url: 'https://hooks.slack.com/services/'
send_resolved: true
channel: '#alerts-infra'
icon_url: https://avatars3.githubusercontent.com/u/3380462
title: '[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }}'
text: >-
{{ range .Alerts }}
*Alert:* {{ .Annotations.description }} - `{{ .Labels.severity }}`
*Description:* {{ .Annotations.description }}
*Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
*Details:*
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
{{ end }}
{{ end }}

View File

@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: alertmanager-
namespace: monitoring
commonLabels:
app: kube-alertmanager
release: kube
prometheus: kube
generatorOptions:
disableNameSuffixHash: true
secretGenerator:
- name: kube-alertmanager
files:
- config/alertmanager.yaml
type: Opaque

View File

@ -1,60 +0,0 @@
app:
enabled: false
kube:
enabled: true
grafana:
enabled: true
testFramework:
enabled: false
sidecar:
datasources:
defaultDatasourceEnabled: false
resources:
requests:
cpu: 1
memory: 2Gi
limits:
cpu: 1
memory: 2Gi
persistence:
enabled: true
type: sts
storageClassName: buildpiper-storage
accessModes:
- ReadWriteOnce
size: 1Gi
finalizers:
- kubernetes.io/pvc-protection
alertmanager:
enabled: false
prometheus:
enabled: true
prometheusSpec:
retention: 3d
resources:
requests:
cpu: 1
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: buildpiper-storage
resources:
requests:
storage: 20Gi
pushgateway:
enabled: false
blackbox:
enabled: false
adapter:
enabled: false
thanos:
enabled: false

View File

@ -1,48 +0,0 @@
app:
enabled: false
kube:
enabled: true
grafana:
enabled: true
testFramework:
enabled: false
sidecar:
datasources:
defaultDatasourceEnabled: false
alertmanager:
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
storageClassName: buildpiper-storage
prometheus:
enabled: true
prometheusSpec:
retention: 7d
resources:
requests:
cpu: 1
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: buildpiper-storage
resources:
requests:
storage: 15Gi
pushgateway:
enabled: false
blackbox:
enabled: false
adapter:
enabled: false
thanos:
enabled: false

View File

@ -1,19 +0,0 @@
app:
enabled: false
kube:
enabled: true
grafana:
enabled: true
sidecar:
datasources:
defaultDatasourceEnabled: false
pushgateway:
enabled: false
blackbox:
enabled: false
adapter:
enabled: true

View File

@ -1,257 +0,0 @@
app:
enabled: false
kube:
enabled: true
fullnameOverride: kube
commonLabels:
prometheus: kube
defaultRules:
create: false
alertmanager:
enabled: true
alertmanagerSpec:
retention: 240h
resources:
requests:
cpu: 250m
memory: 500Mi
limits:
cpu: 250m
memory: 500Mi
storage:
volumeClaimTemplate:
spec:
# storageClassName: encrypted-gp3
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
grafana:
enabled: true
sidecar:
datasources:
defaultDatasourceEnabled: false
kubeApiServer:
enabled: true
kubelet:
enabled: true
namespace: kube-system
kubeControllerManager:
enabled: false
coreDns:
enabled: true
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false
kubeProxy:
enabled: false
kubeStateMetrics:
enabled: true
kube-state-metrics:
customLabels:
prometheus: kube
enabled: true
podSecurityPolicy:
enabled: false
resources:
requests:
cpu: 250m
memory: 500Mi
limits:
cpu: 250m
memory: 500Mi
nodeExporter:
enabled: true
prometheus-node-exporter:
prometheus:
monitor:
additionalLabels:
prometheus: kube
# rbac:
# pspEnabled: false
# image:
# repository:
# tag: latest
# pullPolicy: Always
prometheusOperator:
enabled: true
admissionWebhooks:
enabled: false
deployment:
enabled: true
tls:
enabled: false
prometheus:
enabled: true
thanosService:
enabled: true
thanosServiceMonitor:
enabled: true
prometheusSpec:
externalLabels:
kkubernetes_cluster: opstree
prometheus_cluster: kube
# get more details https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.ThanosSpec
thanos:
version: 0.35.1
# image: quay.io/thanos/thanos:v0.35.1
blockSize: 5m
objectStorageConfig:
existingSecret:
key: objstore.yml
name: monitoring-thanos-objstore-secret
# nodeSelector:
# appType: monitoring
# tolerations:
# - key: "appType"
# operator: "Equal"
# value: "monitoring"
# effect: "NoSchedule"
# remoteWrite:
# - url: https://app.last9.io/jupiter/prometheus/write
# basicAuth:
# username:
# name: promsecret
# key: username
# password:
# name: promsecret
# key: password
## # Do not add the writeRelabelConfigs section if you want to
## # send all metrics via remote write
## writeRelabelConfigs:
# - sourceLabels: [ __name__ ]
# regex: 'istio*'
# action: keep
# image:
# tag: v2.41.0
retention: 1h
replicas: 2
# externalUrl: "http://kube-opstree.prod.internal/"
resources:
requests:
cpu: "500m"
memory: 500Mi
limits:
cpu: "500m"
memory: 500Mi
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: encrypted-gp3
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
serviceMonitorSelector:
matchExpressions:
- key: prometheus
operator: In
values:
- kube
podMonitorSelector:
matchExpressions:
- key: prometheus
operator: In
values:
- kube
ruleSelector:
matchLabels:
prometheus: kube
service:
name: kube-prometheus
pushgateway:
enabled: false
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
prometheus: app
extraArgs:
- --log.level=debug
- --push.disable-consistency-check
resources:
limits:
cpu: 1
memory: 4096Mi
requests:
cpu: 500m
memory: 4096Mi
blackbox:
enabled: false
serviceMonitor:
enabled: true
defaults:
additionalMetricsRelabels: {}
labels:
prometheus: app
interval: 30s
scrapeTimeout: 30s
module: http_2xx
config:
modules:
http_2xx:
prober: http
timeout: 5s
http:
valid_http_versions: [ "HTTP/1.0", "HTTP/1.1", "HTTP/2.0" ]
no_follow_redirects: false
preferred_ip_protocol: "ip4"
fail_if_ssl: false
fail_if_not_ssl: false
adapter:
enabled: false
thanos:
enabled: true
objstoreConfig: |-
type: s3
config:
bucket: thanos
endpoint: monitoring-minio.monitoring.svc.cluster.local:9000
access_key: minio
secret_key: minio123
insecure: true
query:
dnsDiscovery:
sidecarsService: kube-thanos-discovery
sidecarsNamespace: monitoring
bucketweb:
enabled: true
compactor:
enabled: false
storegateway:
enabled: true
ruler:
enabled: true
serviceMonitor:
namespace: monitoring
alertmanagers:
- http://kube-alertmanager.monitoring.svc.cluster.local:9093
config: |-
groups:
- name: "metamonitoring"
rules:
- alert: "PrometheusDown"
expr: absent(up{prometheus="monitoring/kube-prometheus"})
metrics:
enabled: true
serviceMonitor:
namespace: monitoring
enabled: true
minio:
enabled: true
auth:
rootPassword: minio123
rootUser: minio
monitoringBuckets: thanos
accessKey:
password: minio
secretKey:
password: minio123

View File

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
nameSuffix: -grafana-dashboard
resources:
- opentelemetry-apm

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
generatorOptions:
labels:
grafana_dashboard: "1"
disableNameSuffixHash: true
annotations:
k8s-sidecar-target-directory: "/tmp/dashboards/otel-apm"
configMapGenerator:
- name: apm
files:
- apm.json

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-alertmanager-datasource
namespace: monitoring
labels:
grafana_datasource: "1"
app: kube-grafana
prometheus: kube
data:
kube-alertmanager.yaml: |-
apiVersion: 1
datasources:
- name: "kube-alertmanager"
type: alertmanager
uid: alertmanager
url: http://kube-alertmanager.monitoring:9093/
access: proxy
jsonData:
handleGrafanaManagedAlerts: false
implementation: prometheus

View File

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- loki.yaml
- prometheus.yaml
- tempo.yaml
- alertmanager.yaml

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-loki-datasource
namespace: monitoring
labels:
grafana_datasource: "1"
app: kube-grafana
prometheus: kube
data:
kube-loki.yaml: |-
apiVersion: 1
datasources:
- uid: logging
orgId: 1
name: logging
type: loki
typeName: Loki
access: proxy
url: http://loki-logging-gateway.logging.svc
password: ''
user: ''
database: ''
basicAuth: false
isDefault: false
jsonData:
derivedFields:
- datasourceUid: tempo
matcherRegex: (?:trace_id)=(\w+)
name: TraceID
url: $${__value.raw}
readOnly: false
editable: true

View File

@ -1,27 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-prometheus-datasource
namespace: monitoring
labels:
grafana_datasource: "1"
app: kube-grafana
prometheus: kube
data:
kube-prometheus.yaml: |-
apiVersion: 1
datasources:
- name: "kube-prom"
type: prometheus
uid: prometheus
url: http://kube-prometheus.monitoring:9090/
access: proxy
isDefault: true
jsonData:
httpMethod: POST
timeInterval: 30s
exemplarTraceIdDestinations:
- datasourceUid: tempo
name: TraceID
readOnly: false
editable: true

View File

@ -1,34 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: tempo-datasource
namespace: monitoring
labels:
grafana_datasource: "1"
app: kube-grafana
prometheus: kube
data:
tempo.yaml: |-
apiVersion: 1
datasources:
- name: "tempo"
type: tempo
uid: tempo
url: http://tempo.observability.svc.cluster.local:3100/
access: proxy
jsonData:
handleGrafanaManagedAlerts: false
implementation: prometheus
nodeGraph:
enabled: true
search:
hide: false
lokiSearch:
datasourceUid: loki
tracesToLogs:
datasourceUid: loki
filterBySpanID: false
filterByTraceID: true
mapTagNamesEnabled: false
tags:
- app

View File

@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- thanos.yaml

View File

@ -1,27 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-thanos-datasource
namespace: monitoring
labels:
grafana_datasource: "1"
app: kube-grafana
prometheus: kube
data:
kube-thanos.yaml: |-
apiVersion: 1
datasources:
- name: "kube-thanos"
type: prometheus
uid: thanos
url: http://monitoring-thanos-query-frontend.monitoring:9090/
access: proxy
jsonData:
httpMethod: POST
timeInterval: 30s
exemplarTraceIdDestinations:
- datasourceUid: tempo
name: TraceID
httpMethod: POST
readOnly: false
editable: true

View File

@ -1,359 +0,0 @@
app:
enabled: false
fullnameOverride: app
commonLabels:
prometheus: app
defaultRules:
create: false
alertmanager:
enabled: false
grafana:
enabled: false
kubeApiServer:
enabled: false
kubelet:
enabled: false
kubeControllerManager:
enabled: false
coreDns:
enabled: false
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false
kubeProxy:
enabled: false
kubeStateMetrics:
enabled: false
kube-state-metrics:
enabled: false
nodeExporter:
enabled: false
prometheusOperator:
enabled: false
admissionWebhooks:
enabled: false
configReloaderCpu: 300m
configReloaderMemory: 300Mi
prometheus:
enabled: true
prometheusSpec:
# nodeSelector:
# appType: monitoring
# tolerations:
# - key: "appType"
# operator: "Equal"
# value: "monitoring"
# effect: "NoSchedule"
retention: 30d
replicas: 1
# externalUrl: ""
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: encrypted-gp3
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
alertingEndpoints:
- name: kube-alertmanager
namespace: monitoring
port: web
pathPrefix: /
apiVersion: v2
serviceMonitorSelector:
matchExpressions:
- key: prometheus
operator: In
values:
- app
podMonitorSelector:
matchExpressions:
- key: prometheus
operator: In
values:
- app
ruleSelector:
matchLabels:
prometheus: app
additionalScrapeConfigs:
- job_name: kubernets-servics-probe
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: service
scrape_interval: 30s
scrape_timeout: 25s
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_probe
regex: true
action: keep
- source_labels:
- __meta_kubernetes_service_name
target_label: service
- source_labels:
- __address__,__meta_kubernetes_service_annotation_prometheus_io_path
regex: (.+);(.+)
target_label: __param_target
replacement: ${1}${2}
- source_labels:
- __param_target ]
target_label: instance
- source_labels: []
target_label: __address__
replacement: monitoring-prometheus-blackbox-exporter:9115
service:
name: app-prometheus
kube:
enabled: true
fullnameOverride: kube
commonLabels:
prometheus: kube
defaultRules:
create: false
alertmanager:
enabled: true
alertmanagerSpec:
retention: 240h
resources:
requests:
cpu: 250m
memory: 500Mi
limits:
cpu: 250m
memory: 500Mi
storage:
volumeClaimTemplate:
spec:
# storageClassName: encrypted-gp3
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
grafana:
enabled: true
testFramework:
enabled: false
sidecar:
datasources:
defaultDatasourceEnabled: false
kubeApiServer:
enabled: true
kubelet:
enabled: true
namespace: kube-system
kubeControllerManager:
enabled: false
coreDns:
enabled: true
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false
kubeProxy:
enabled: false
kubeStateMetrics:
enabled: true
kube-state-metrics:
customLabels:
prometheus: kube
enabled: true
podSecurityPolicy:
enabled: false
resources:
requests:
cpu: 250m
memory: 500Mi
limits:
cpu: 250m
memory: 500Mi
nodeExporter:
enabled: true
prometheus-node-exporter:
prometheus:
monitor:
additionalLabels:
prometheus: kube
# rbac:
# pspEnabled: false
# image:
# repository:
# tag: latest
# pullPolicy: Always
prometheusOperator:
enabled: true
admissionWebhooks:
enabled: false
deployment:
enabled: true
tls:
enabled: false
prometheus:
enabled: true
prometheusSpec:
# nodeSelector:
# appType: monitoring
# tolerations:
# - key: "appType"
# operator: "Equal"
# value: "monitoring"
# effect: "NoSchedule"
# remoteWrite:
# - url: https://app.last9.io/jupiter/prometheus/write
# basicAuth:
# username:
# name: promsecret
# key: username
# password:
# name: promsecret
# key: password
## # Do not add the writeRelabelConfigs section if you want to
## # send all metrics via remote write
## writeRelabelConfigs:
# - sourceLabels: [ __name__ ]
# regex: 'istio*'
# action: keep
# image:
# tag: v2.41.0
retention: 30d
replicas: 1
# externalUrl: "http://kube-opstree.prod.internal/"
resources:
requests:
cpu: "500m"
memory: 500Mi
limits:
cpu: "500m"
memory: 500Mi
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: encrypted-gp3
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
serviceMonitorSelector:
matchExpressions:
- key: prometheus
operator: In
values:
- kube
podMonitorSelector:
matchExpressions:
- key: prometheus
operator: In
values:
- kube
ruleSelector:
matchLabels:
prometheus: kube
service:
name: kube-prometheus
pushgateway:
enabled: false
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
prometheus: app
extraArgs:
- --log.level=debug
- --push.disable-consistency-check
resources:
limits:
cpu: 1
memory: 4096Mi
requests:
cpu: 500m
memory: 4096Mi
blackbox:
enabled: false
serviceMonitor:
enabled: true
defaults:
additionalMetricsRelabels: {}
labels:
prometheus: app
interval: 30s
scrapeTimeout: 30s
module: http_2xx
config:
modules:
http_2xx:
prober: http
timeout: 5s
http:
valid_http_versions:
- "HTTP/1.0"
- "HTTP/1.1"
- "HTTP/2.0"
no_follow_redirects: false
preferred_ip_protocol: "ip4"
fail_if_ssl: false
fail_if_not_ssl: false
adapter:
enabled: false
k8s-events:
enabled: true
serviceAccount:
create: false
metrics:
enabled: true
serviceMonitor:
enabled: true
labels:
prometheus: kube
release: monitoring
config:
logLevel: debug
logFormat: json
receivers:
- name: "loki"
loki:
url: http://logging-loki-gateway.logging.svc.cluster.local/loki/api/v1/push
layout:
message: "{{ .msg }}"
reason: "{{ .Reason }}"
type: "{{ .Type }}"
count: "{{ .Count }}"
kind: "{{ .InvolvedObject.Kind }}"
name: "{{ .InvolvedObject.Name }}"
namespace: "{{ .Namespace }}"
component: "{{ .Source.Component }}"
host: "{{ .Source.Host }}"
route:
routes:
- match:
- receiver: "loki"
rbac:
rules:
- apiGroups: [metrics.k8s.io]
resources: [pods, nodes]
verbs: [get, list, watch]
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "watch", "list"]
thanos:
enabled: false

View File

@ -1,21 +0,0 @@
apiVersion: v2
name: psmdb-operator-db
description: A Helm chart for Percona Operator and Percona Server for MongoDB
type: application
version: 1.0.0
appVersion: 1.0.0
dependencies:
- name: psmdb-operator
version: 1.18.0
repository: https://percona.github.io/percona-helm-charts/
alias: psmdb-operator
tags:
- psmdb-operator
condition: psmdb-operator.enabled
- name: psmdb-db
version: 1.18.0
repository: https://percona.github.io/percona-helm-charts/
alias: psmdb-db
tags:
- psmdb-db
condition: psmdb-db.enabled

View File

@ -1,2 +0,0 @@
Backup and Restore have been tested using backup.yaml and restore.yaml files respectively using Azure Blob Storage.
For using cloud storage as backup, a Kubernetes secret need to be made: https://docs.percona.com/percona-operator-for-mongodb/backup-tutorial.html#configure-backup-storage

View File

@ -1,266 +0,0 @@
# Percona Server for MongoDB
This chart deploys Percona Operator and Percona Server for MongoDB Cluster on Kubernetes controlled by Percona Operator for MongoDB.
Useful links:
- [Operator Github repository](https://github.com/percona/percona-server-mongodb-operator)
- [Operator Documentation](https://www.percona.com/doc/kubernetes-operator-for-psmongodb/index.html)
## Pre-requisites
* Kubernetes 1.26+
* Helm v3
# Chart Details
This chart will deploy the Operator Pod and Percona Server for MongoDB Cluster in Kubernetes. It will create a Custom Resource, and the Operator will trigger the creation of corresponding Kubernetes primitives: StatefulSets, Pods, Secrets, etc.
## Installing the Chart
To install the chart with the `psmdb` release name using a dedicated namespace (recommended):
```sh
helm dependency build
helm install my-db <path-to-chart> --namespace my-namespace
```
The chart can be customized using the following configurable parameters:
| Parameter | Description | Default |
| ------------------------------- | ------------------------------------------------------------------------------|---------------------------------------|
| `crVersion` | CR Cluster Manifest version | `1.16.2` |
| `pause` | Stop PSMDB Database safely | `false` |
| `unmanaged` | Start cluster and don't manage it (cross cluster replication) | `false` |
| `unsafeFlags.tls` | Allows users from configuring a cluster without TLS/SSL certificates | `false` |
| `unsafeFlags.replsetSize` | Allows users from configuring a cluster with unsafe parameters: starting it with less than 3 replica set instances or with an even number of replica set instances without additional arbiter | `false` |
| `unsafeFlags.mongosSize` | Allows users from configuring a sharded cluster with less than 3 config server Pods or less than 2 mongos Pods | `false` |
| `unsafeFlags.terminationGracePeriod` | Allows users from configuring a sharded cluster without termination grace period for replica set | `false` |
| `unsafeFlags.backupIfUnhealthy` | Allows running backup on a cluster with failed health checks | `false` |
| `clusterServiceDNSSuffix` | The (non-standard) cluster domain to be used as a suffix of the Service name | `""` |
| `clusterServiceDNSMode` | Mode for the cluster service dns (Internal/ServiceMesh) | `""` |
| `annotations` | PSMDB custom resource annotations | `{}` |
| `ignoreAnnotations` | The list of annotations to be ignored by the Operator | `[]` |
| `ignoreLabels` | The list of labels to be ignored by the Operator | `[]` |
| `multiCluster.enabled` | Enable Multi Cluster Services (MCS) cluster mode | `false` |
| `multiCluster.DNSSuffix` | The cluster domain to be used as a suffix for multi-cluster Services used by Kubernetes | `""` |
| `updateStrategy` | Regulates the way how PSMDB Cluster Pods will be updated after setting a new image | `SmartUpdate` |
| `upgradeOptions.versionServiceEndpoint` | Endpoint for actual PSMDB Versions provider | `https://check.percona.com/versions/` |
| `upgradeOptions.apply` | PSMDB image to apply from version service - recommended, latest, actual version like 4.4.2-4 | `disabled` |
| `upgradeOptions.schedule` | Cron formatted time to execute the update | `"0 2 * * *"` |
| `upgradeOptions.setFCV` | Set feature compatibility version on major upgrade | `false` |
| `finalizers:delete-psmdb-pvc` | Set this if you want to delete database persistent volumes on cluster deletion | `[]` |
| `finalizers:delete-psmdb-pods-in-order` | Set this if you want to delete PSMDB pods in order (primary last) | `[]` |
| `image.repository` | PSMDB Container image repository | `percona/percona-server-mongodb` |
| `image.tag` | PSMDB Container image tag | `6.0.9-7` |
| `imagePullPolicy` | The policy used to update images | `Always` |
| `imagePullSecrets` | PSMDB Container pull secret | `[]` |
| `initImage.repository` | Repository for custom init image | `""` |
| `initImage.tag` | Tag for custom init image | `""` |
| `initContainerSecurityContext` | A custom Kubernetes Security Context for a Container for the initImage | `{}` |
| `tls.mode` | Control usage of TLS (allowTLS, preferTLS, requireTLS, disabled) | `preferTLS` |
| `tls.certValidityDuration` | The validity duration of the external certificate for cert manager | `""` |
| `tls.allowInvalidCertificates` | If enabled the mongo shell will not attempt to validate the server certificates | `true` |
| `tls.issuerConf.name` | A cert-manager issuer name | `""` |
| `tls.issuerConf.kind` | A cert-manager issuer kind | `""` |
| `tls.issuerConf.group` | A cert-manager issuer group | `""` |
| `secrets.users` | The name of the Secrets object for the MongoDB users required to run the operator | `""` |
| `secrets.encryptionKey` | Set secret for data at rest encryption key | `""` |
| `secrets.vault` | Specifies a secret object to provide integration with HashiCorp Vault | `""` |
| `secrets.ldapSecret` | Specifies a secret object for LDAP over TLS connection between MongoDB and OpenLDAP server | `""` |
| `secrets.sse` | The name of the Secrets object for server side encryption credentials | `""` |
| `secrets.ssl` | A secret with TLS certificate generated for external communications | `""` |
| `secrets.sslInternal` | A secret with TLS certificate generated for internal communications | `""` |
| `pmm.enabled` | Enable integration with [Percona Monitoring and Management software](https://www.percona.com/blog/2020/07/23/using-percona-kubernetes-operators-with-percona-monitoring-and-management/) | `false` |
| `pmm.image.repository` | PMM Container image repository | `percona/pmm-client` |
| `pmm.image.tag` | PMM Container image tag | `2.41.2` |
| `pmm.serverHost` | PMM server related K8S service hostname | `monitoring-service` |
||
| `replsets.rs0.name` | ReplicaSet name | `rs0` |
| `replsets.rs0.size` | ReplicaSet size (pod quantity) | `3` |
| `replsets.rs0.terminationGracePeriodSeconds` | The amount of seconds Kubernetes will wait for a clean replica set Pods termination | `""` |
| `replsets.rs0.externalNodes` | ReplicaSet external nodes (cross cluster replication) | `[]` |
| `replsets.rs0.configuration` | Custom config for mongod in replica set | `""` |
| `replsets.rs0.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains | `{}` |
| `replsets.rs0.serviceAccountName` | Run replicaset Containers under specified K8S SA | `""` |
| `replsets.rs0.affinity.antiAffinityTopologyKey` | ReplicaSet Pod affinity | `kubernetes.io/hostname` |
| `replsets.rs0.affinity.advanced` | ReplicaSet Pod advanced affinity | `{}` |
| `replsets.rs0.tolerations` | ReplicaSet Pod tolerations | `[]` |
| `replsets.rs0.priorityClass` | ReplicaSet Pod priorityClassName | `""` |
| `replsets.rs0.annotations` | ReplicaSet Pod annotations | `{}` |
| `replsets.rs0.labels` | ReplicaSet Pod labels | `{}` |
| `replsets.rs0.nodeSelector` | ReplicaSet Pod nodeSelector labels | `{}` |
| `replsets.rs0.livenessProbe` | ReplicaSet Pod livenessProbe structure | `{}` |
| `replsets.rs0.readinessProbe` | ReplicaSet Pod readinessProbe structure | `{}` |
| `replsets.rs0.storage` | Set cacheSizeRatio or other custom MongoDB storage options | `{}` |
| `replsets.rs0.podSecurityContext` | Set the security context for a Pod | `{}` |
| `replsets.rs0.containerSecurityContext` | Set the security context for a Container | `{}` |
| `replsets.rs0.runtimeClass` | ReplicaSet Pod runtimeClassName | `""` |
| `replsets.rs0.sidecars` | ReplicaSet Pod sidecars | `{}` |
| `replsets.rs0.sidecarVolumes` | ReplicaSet Pod sidecar volumes | `[]` |
| `replsets.rs0.sidecarPVCs` | ReplicaSet Pod sidecar PVCs | `[]` |
| `replsets.rs0.podDisruptionBudget.maxUnavailable` | ReplicaSet failed Pods maximum quantity | `1` |
| `replsets.rs0.splitHorizons` | External URI for Split-horizon for replica set Pods of the exposed cluster | `{}` |
| `replsets.rs0.expose.enabled` | Allow access to replicaSet from outside of Kubernetes | `false` |
| `replsets.rs0.expose.exposeType` | Network service access point type | `ClusterIP` |
| `replsets.rs0.expose.loadBalancerSourceRanges` | Limit client IP's access to Load Balancer | `{}` |
| `replsets.rs0.expose.serviceAnnotations` | ReplicaSet service annotations | `{}` |
| `replsets.rs0.expose.serviceLabels` | ReplicaSet service labels | `{}` |
| `replsets.rs0.schedulerName` | ReplicaSet Pod schedulerName | `""` |
| `replsets.rs0.resources` | ReplicaSet Pods resource requests and limits | `{}` |
| `replsets.rs0.volumeSpec` | ReplicaSet Pods storage resources | `{}` |
| `replsets.rs0.volumeSpec.emptyDir` | ReplicaSet Pods emptyDir K8S storage | `{}` |
| `replsets.rs0.volumeSpec.hostPath` | ReplicaSet Pods hostPath K8S storage | |
| `replsets.rs0.volumeSpec.hostPath.path` | ReplicaSet Pods hostPath K8S storage path | `""` |
| `replsets.rs0.volumeSpec.hostPath.type` | Type for hostPath volume | `Directory` |
| `replsets.rs0.volumeSpec.pvc` | ReplicaSet Pods PVC request parameters | |
| `replsets.rs0.volumeSpec.pvc.annotations` | The Kubernetes annotations metadata for Persistent Volume Claim | `{}` |
| `replsets.rs0.volumeSpec.pvc.labels` | The Kubernetes labels metadata for Persistent Volume Claim | `{}` |
| `replsets.rs0.volumeSpec.pvc.storageClassName` | ReplicaSet Pods PVC target storageClass | `""` |
| `replsets.rs0.volumeSpec.pvc.accessModes` | ReplicaSet Pods PVC access policy | `[]` |
| `replsets.rs0.volumeSpec.pvc.resources.requests.storage` | ReplicaSet Pods PVC storage size | `3Gi` |
| `replsets.rs0.hostAliases` | The IP address for Kubernetes host aliases | `[]` |
| `replsets.rs0.nonvoting.enabled` | Add MongoDB nonvoting Pods | `false` |
| `replsets.rs0.nonvoting.podSecurityContext` | Set the security context for a Pod | `{}` |
| `replsets.rs0.nonvoting.containerSecurityContext` | Set the security context for a Container | `{}` |
| `replsets.rs0.nonvoting.size` | Number of nonvoting Pods | `1` |
| `replsets.rs0.nonvoting.configuration` | Custom config for mongod nonvoting member | `""` |
| `replsets.rs0.nonvoting.serviceAccountName` | Run replicaset nonvoting Container under specified K8S SA | `""` |
| `replsets.rs0.nonvoting.affinity.antiAffinityTopologyKey` | Nonvoting Pods affinity | `kubernetes.io/hostname` |
| `replsets.rs0.nonvoting.affinity.advanced` | Nonvoting Pods advanced affinity | `{}` |
| `replsets.rs0.nonvoting.tolerations` | Nonvoting Pod tolerations | `[]` |
| `replsets.rs0.nonvoting.priorityClass` | Nonvoting Pod priorityClassName | `""` |
| `replsets.rs0.nonvoting.annotations` | Nonvoting Pod annotations | `{}` |
| `replsets.rs0.nonvoting.labels` | Nonvoting Pod labels | `{}` |
| `replsets.rs0.nonvoting.nodeSelector` | Nonvoting Pod nodeSelector labels | `{}` |
| `replsets.rs0.nonvoting.podDisruptionBudget.maxUnavailable` | Nonvoting failed Pods maximum quantity | `1` |
| `replsets.rs0.nonvoting.resources` | Nonvoting Pods resource requests and limits | `{}` |
| `replsets.rs0.nonvoting.volumeSpec` | Nonvoting Pods storage resources | `{}` |
| `replsets.rs0.nonvoting.volumeSpec.emptyDir` | Nonvoting Pods emptyDir K8S storage | `{}` |
| `replsets.rs0.nonvoting.volumeSpec.hostPath` | Nonvoting Pods hostPath K8S storage | |
| `replsets.rs0.nonvoting.volumeSpec.hostPath.path` | Nonvoting Pods hostPath K8S storage path | `""` |
| `replsets.rs0.nonvoting.volumeSpec.hostPath.type` | Type for hostPath volume | `Directory` |
| `replsets.rs0.nonvoting.volumeSpec.pvc` | Nonvoting Pods PVC request parameters | |
| `replsets.rs0.nonvoting.volumeSpec.pvc.annotations` | The Kubernetes annotations metadata for Persistent Volume Claim | `{}` |
| `replsets.rs0.nonvoting.volumeSpec.pvc.labels` | The Kubernetes labels metadata for Persistent Volume Claim | `{}` |
| `replsets.rs0.nonvoting.volumeSpec.pvc.storageClassName` | Nonvoting Pods PVC target storageClass | `""` |
| `replsets.rs0.nonvoting.volumeSpec.pvc.accessModes` | Nonvoting Pods PVC access policy | `[]` |
| `replsets.rs0.nonvoting.volumeSpec.pvc.resources.requests.storage` | Nonvoting Pods PVC storage size | `3Gi` |
| `replsets.rs0.arbiter.enabled` | Create MongoDB arbiter service | `false` |
| `replsets.rs0.arbiter.size` | MongoDB arbiter Pod quantity | `1` |
| `replsets.rs0.arbiter.serviceAccountName` | Run replicaset arbiter Container under specified K8S SA | `""` |
| `replsets.rs0.arbiter.affinity.antiAffinityTopologyKey` | MongoDB arbiter Pod affinity | `kubernetes.io/hostname` |
| `replsets.rs0.arbiter.affinity.advanced` | MongoDB arbiter Pod advanced affinity | `{}` |
| `replsets.rs0.arbiter.tolerations` | MongoDB arbiter Pod tolerations | `[]` |
| `replsets.rs0.arbiter.priorityClass` | MongoDB arbiter priorityClassName | `""` |
| `replsets.rs0.arbiter.annotations` | MongoDB arbiter Pod annotations | `{}` |
| `replsets.rs0.arbiter.labels` | MongoDB arbiter Pod labels | `{}` |
| `replsets.rs0.arbiter.nodeSelector` | MongoDB arbiter Pod nodeSelector labels | `{}` |
| |
| `sharding.enabled` | Enable sharding setup | `true` |
| `sharding.balancer.enabled` | Enable/disable balancer | `true` |
| `sharding.configrs.size` | Config ReplicaSet size (pod quantity) | `3` |
| `sharding.configrs.terminationGracePeriodSeconds` | The amount of seconds Kubernetes will wait for a clean replica set Pods termination | `""` |
| `sharding.configrs.externalNodes` | Config ReplicaSet external nodes (cross cluster replication) | `[]` |
| `sharding.configrs.configuration` | Custom config for mongod in config replica set | `""` |
| `sharding.configrs.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains | `{}` |
| `sharding.configrs.serviceAccountName` | Run sharding configrs Containers under specified K8S SA | `""` |
| `sharding.configrs.affinity.antiAffinityTopologyKey` | Config ReplicaSet Pod affinity | `kubernetes.io/hostname` |
| `sharding.configrs.affinity.advanced` | Config ReplicaSet Pod advanced affinity | `{}` |
| `sharding.configrs.tolerations` | Config ReplicaSet Pod tolerations | `[]` |
| `sharding.configrs.priorityClass` | Config ReplicaSet Pod priorityClassName | `""` |
| `sharding.configrs.annotations` | Config ReplicaSet Pod annotations | `{}` |
| `sharding.configrs.labels` | Config ReplicaSet Pod labels | `{}` |
| `sharding.configrs.nodeSelector` | Config ReplicaSet Pod nodeSelector labels | `{}` |
| `sharding.configrs.livenessProbe` | Config ReplicaSet Pod livenessProbe structure | `{}` |
| `sharding.configrs.readinessProbe` | Config ReplicaSet Pod readinessProbe structure | `{}` |
| `sharding.configrs.storage` | Set cacheSizeRatio or other custom MongoDB storage options | `{}` |
| `sharding.configrs.podSecurityContext` | Set the security context for a Pod | `{}` |
| `sharding.configrs.containerSecurityContext` | Set the security context for a Container | `{}` |
| `sharding.configrs.runtimeClass` | Config ReplicaSet Pod runtimeClassName | `""` |
| `sharding.configrs.sidecars` | Config ReplicaSet Pod sidecars | `{}` |
| `sharding.configrs.sidecarVolumes` | Config ReplicaSet Pod sidecar volumes | `[]` |
| `sharding.configrs.sidecarPVCs` | Config ReplicaSet Pod sidecar PVCs | `[]` |
| `sharding.configrs.podDisruptionBudget.maxUnavailable` | Config ReplicaSet failed Pods maximum quantity | `1` |
| `sharding.configrs.expose.enabled` | Allow access to cfg replica from outside of Kubernetes | `false` |
| `sharding.configrs.expose.exposeType` | Network service access point type | `ClusterIP` |
| `sharding.configrs.expose.loadBalancerSourceRanges` | Limit client IP's access to Load Balancer | `{}` |
| `sharding.configrs.expose.serviceAnnotations` | Config ReplicaSet service annotations | `{}` |
| `sharding.configrs.expose.serviceLabels` | Config ReplicaSet service labels | `{}` |
| `sharding.configrs.resources.limits.cpu` | Config ReplicaSet resource limits CPU | `300m` |
| `sharding.configrs.resources.limits.memory` | Config ReplicaSet resource limits memory | `0.5G` |
| `sharding.configrs.resources.requests.cpu` | Config ReplicaSet resource requests CPU | `300m` |
| `sharding.configrs.resources.requests.memory` | Config ReplicaSet resource requests memory | `0.5G` |
| `sharding.configrs.volumeSpec.hostPath` | Config ReplicaSet hostPath K8S storage | |
| `sharding.configrs.volumeSpec.hostPath.path` | Config ReplicaSet hostPath K8S storage path | `""` |
| `sharding.configrs.volumeSpec.hostPath.type` | Type for hostPath volum | `Directory` |
| `sharding.configrs.volumeSpec.emptyDir` | Config ReplicaSet Pods emptyDir K8S storage | |
| `sharding.configrs.volumeSpec.pvc` | Config ReplicaSet Pods PVC request parameters | |
| `sharding.configrs.volumeSpec.pvc.annotations` | The Kubernetes annotations metadata for Persistent Volume Claim | `{}` |
| `sharding.configrs.volumeSpec.pvc.labels` | The Kubernetes labels metadata for Persistent Volume Claim | `{}` |
| `sharding.configrs.volumeSpec.pvc.storageClassName` | Config ReplicaSet Pods PVC storageClass | `""` |
| `sharding.configrs.volumeSpec.pvc.accessModes` | Config ReplicaSet Pods PVC access policy | `[]` |
| `sharding.configrs.volumeSpec.pvc.resources.requests.storage` | Config ReplicaSet Pods PVC storage size | `3Gi` |
| `sharding.configrs.hostAliases` | The IP address for Kubernetes host aliases | `[]` |
| `sharding.mongos.size` | Mongos size (pod quantity) | `3` |
| `sharding.mongos.terminationGracePeriodSeconds` | The amount of seconds Kubernetes will wait for a clean mongos Pods termination | `""` |
| `sharding.mongos.configuration` | Custom config for mongos | `""` |
| `sharding.mongos.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains | `{}` |
| `sharding.mongos.serviceAccountName` | Run sharding mongos Containers under specified K8S SA | `""` |
| `sharding.mongos.affinity.antiAffinityTopologyKey` | Mongos Pods affinity | `kubernetes.io/hostname` |
| `sharding.mongos.affinity.advanced` | Mongos Pods advanced affinity | `{}` |
| `sharding.mongos.tolerations` | Mongos Pods tolerations | `[]` |
| `sharding.mongos.priorityClass` | Mongos Pods priorityClassName | `""` |
| `sharding.mongos.annotations` | Mongos Pods annotations | `{}` |
| `sharding.mongos.labels` | Mongos Pods labels | `{}` |
| `sharding.mongos.nodeSelector` | Mongos Pods nodeSelector labels | `{}` |
| `sharding.mongos.livenessProbe` | Mongos Pod livenessProbe structure | `{}` |
| `sharding.mongos.readinessProbe` | Mongos Pod readinessProbe structure | `{}` |
| `sharding.mongos.podSecurityContext` | Set the security context for a Pod | `{}` |
| `sharding.mongos.containerSecurityContext` | Set the security context for a Container | `{}` |
| `sharding.mongos.runtimeClass` | Mongos Pod runtimeClassName | `""` |
| `sharding.mongos.sidecars` | Mongos Pod sidecars | `{}` |
| `sharding.mongos.sidecarVolumes` | Mongos Pod sidecar volumes | `[]` |
| `sharding.mongos.sidecarPVCs` | Mongos Pod sidecar PVCs | `[]` |
| `sharding.mongos.podDisruptionBudget.maxUnavailable` | Mongos failed Pods maximum quantity | `1` |
| `sharding.mongos.resources.limits.cpu` | Mongos Pods resource limits CPU | `300m` |
| `sharding.mongos.resources.limits.memory` | Mongos Pods resource limits memory | `0.5G` |
| `sharding.mongos.resources.requests.cpu` | Mongos Pods resource requests CPU | `300m` |
| `sharding.mongos.resources.requests.memory` | Mongos Pods resource requests memory | `0.5G` |
| `sharding.mongos.expose.exposeType` | Mongos service exposeType | `ClusterIP` |
| `sharding.mongos.expose.servicePerPod` | Create a separate ClusterIP Service for each mongos instance | `false` |
| `sharding.mongos.expose.loadBalancerSourceRanges` | Limit client IP's access to Load Balancer | `{}` |
| `sharding.mongos.expose.serviceAnnotations` | Mongos service annotations | `{}` |
| `sharding.mongos.expose.serviceLabels` | Mongos service labels | `{}` |
| `sharding.mongos.expose.nodePort` | Custom port if exposing mongos via NodePort | `""` |
| `sharding.mongos.hostAliases` | The IP address for Kubernetes host aliases | `[]` |
| |
| `backup.enabled` | Enable backup PBM agent | `true` |
| `backup.annotations` | Backup job annotations | `{}` |
| `backup.podSecurityContext` | Set the security context for a Pod | `{}` |
| `backup.containerSecurityContext` | Set the security context for a Container | `{}` |
| `backup.restartOnFailure` | Backup Pods restart policy | `true` |
| `backup.image.repository` | PBM Container image repository | `percona/percona-backup-mongodb` |
| `backup.image.tag` | PBM Container image tag | `2.3.0` |
| `backup.storages` | Local/remote backup storages settings | `{}` |
| `backup.pitr.enabled` | Enable point in time recovery for backup | `false` |
| `backup.pitr.oplogOnly` | Start collecting oplogs even if full logical backup doesn't exist | `false` |
| `backup.pitr.oplogSpanMin` | Number of minutes between the uploads of oplogs | `10` |
| `backup.pitr.compressionType` | The point-in-time-recovery chunks compression format | `""` |
| `backup.pitr.compressionLevel` | The point-in-time-recovery chunks compression level | `""` |
| `backup.configuration.backupOptions` | Custom configuration settings for backup | `{}` |
| `backup.configuration.restoreOptions` | Custom configuration settings for restore | `{}` |
| `backup.tasks` | Backup working schedule | `{}` |
| `users` | PSMDB essential users | `{}` |
Specify parameters using `--set key=value[,key=value]` argument to `helm install`
Notice that you can use multiple replica sets only with sharding enabled.
## Examples
### Deploy a replica set with disabled backups and no mongos pods
This is great for a dev PSMDB/MongoDB cluster as it doesn't bother with backups and sharding setup.
```bash
$ helm install dev --namespace psmdb . \
--set runUid=1001 --set "replsets.rs0.volumeSpec.pvc.resources.requests.storage=20Gi" \
--set backup.enabled=false --set sharding.enabled=false
```

View File

@ -1,18 +0,0 @@
{{- if .Values.backup.enabled }}
apiVersion: psmdb.percona.com/v1
kind: PerconaServerMongoDBBackup
metadata:
name: {{ .Values.backup.name }}
{{- if .Values.backup.annotations }}
annotations:
{{ .Values.backup.annotations | toYaml | indent 4 }}
{{- end }}
{{- if .Values.backup.labels }}
labels:
{{ .Values.backup.labels | toYaml | indent 4 }}
{{- end }}
spec:
clusterName: {{ .Values.backup.clusterName }}
storageName: {{ .Values.backup.storageName }}
type: {{ .Values.backup.type }}
{{- end }}

View File

@ -1,17 +0,0 @@
{{- if .Values.restore.enabled }}
apiVersion: psmdb.percona.com/v1
kind: PerconaServerMongoDBRestore
metadata:
name: {{ .Values.restore.name }}
{{- if .Values.restore.annotations }}
annotations:
{{ .Values.restore.annotations | toYaml | indent 4 }}
{{- end }}
{{- if .Values.restore.labels }}
labels:
{{ .Values.restore.labels | toYaml | indent 4 }}
{{- end }}
spec:
clusterName: {{ .Values.restore.clusterName }}
backupName: {{ .Values.restore.backupName }}
{{- end }}

View File

@ -1,805 +0,0 @@
psmdb-operator:
enabled: true
# Default values for psmdb-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: percona/percona-server-mongodb-operator
tag: 1.18.0
pullPolicy: IfNotPresent
# disableTelemetry: according to
# https://docs.percona.com/percona-operator-for-mongodb/telemetry.html
# this is how you can disable telemetry collection
# default is false which means telemetry will be collected
disableTelemetry: false
# set if you want to specify a namespace to watch
# defaults to `.Release.namespace` if left blank
# multiple namespaces can be specified and separated by comma
# watchNamespace:
# set if you want that watched namespaces are created by helm
# createNamespace: false
# set if operator should be deployed in cluster wide mode. defaults to false
watchAllNamespaces: false
# rbac: settings for deployer RBAC creation
rbac:
# rbac.create: if false RBAC resources should be in place
create: true
# serviceAccount: settings for Service Accounts used by the deployer
serviceAccount:
# serviceAccount.create: Whether to create the Service Accounts or not
create: true
# annotations to add to the service account
annotations: {}
# annotations to add to the operator deployment
annotations: {}
# labels to add to the operator deployment
labels: {}
# annotations to add to the operator pod
podAnnotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "8080"
# labels to the operator pod
podLabels: {}
podSecurityContext: {}
# runAsNonRoot: true
# runAsUser: 2
# runAsGroup: 2
# fsGroup: 2
# fsGroupChangePolicy: "OnRootMismatch"
securityContext: {}
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# seccompProfile:
# type: RuntimeDefault
# set if you want to use a different operator name
# defaults to `percona-server-mongodb-operator`
# operatorName:
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
env:
resyncPeriod: 5s
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
logStructured: false
logLevel: "INFO"
psmdb-db:
enabled: true
# Default values for psmdb-cluster.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Platform type: kubernetes, openshift
# platform: kubernetes
# Cluster DNS Suffix
# clusterServiceDNSSuffix: svc.cluster.local
# clusterServiceDNSMode: "Internal"
finalizers:
## Set this if you want that operator deletes the primary pod last
- percona.com/delete-psmdb-pods-in-order
## Set this if you want to delete database persistent volumes on cluster deletion
# - percona.com/delete-psmdb-pvc
## Set this if you want to delete all pitr chunks on cluster deletion
# - percona.com/delete-pitr-chunks
nameOverride: ""
fullnameOverride: ""
crVersion: 1.18.0
pause: false
unmanaged: false
unsafeFlags:
tls: false
replsetSize: true
mongosSize: false
terminationGracePeriod: false
backupIfUnhealthy: false
enableVolumeExpansion: false
annotations: {}
# ignoreAnnotations:
# - service.beta.kubernetes.io/aws-load-balancer-backend-protocol
# ignoreLabels:
# - rack
multiCluster:
enabled: false
# DNSSuffix: svc.clusterset.local
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: disabled
schedule: "0 2 * * *"
setFCV: false
image:
repository: percona/percona-server-mongodb
tag: 7.0.14-8-multi
imagePullPolicy: Always
# imagePullSecrets: []
# initImage:
# repository: percona/percona-server-mongodb-operator
# tag: 1.18.0
# initContainerSecurityContext: {}
# tls:
# mode: preferTLS
# # 90 days in hours
# certValidityDuration: 2160h
# allowInvalidCertificates: true
# issuerConf:
# name: special-selfsigned-issuer
# kind: ClusterIssuer
# group: cert-manager.io
secrets: {}
# If you set users secret here the operator will use existing one or generate random values
# If not set the operator generates the default secret with name <cluster_name>-secrets
# users: my-cluster-name-secrets
# encryptionKey: my-cluster-name-mongodb-encryption-key
# keyFile: my-cluster-name-mongodb-keyfile
# vault: my-cluster-name-vault
# ldapSecret: my-ldap-secret
# sse: my-cluster-name-sse
pmm:
enabled: false
image:
repository: percona/pmm-client
tag: 2.43.2
serverHost: monitoring-service
# mongodParams: ""
# mongosParams: ""
# resources: {}
# containerSecurityContext: {}
replsets:
rs0:
name: rs0
size: 3
# terminationGracePeriodSeconds: 300
# externalNodes:
# - host: 34.124.76.90
# - host: 34.124.76.91
# port: 27017
# votes: 0
# priority: 0
# - host: 34.124.76.92
# configuration: |
# operationProfiling:
# mode: slowOp
# systemLog:
# verbosity: 1
# serviceAccountName: percona-server-mongodb-operator
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-server-mongodb
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
# replsetOverrides:
# my-cluster-name-rs0-0:
# host: my-cluster-name-rs0-0.example.net:27017
# tags:
# key: value-0
# my-cluster-name-rs0-1:
# host: my-cluster-name-rs0-1.example.net:27017
# tags:
# key: value-1
# my-cluster-name-rs0-2:
# host: my-cluster-name-rs0-2.example.net:27017
# tags:
# key: value-2
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: security
# operator: In
# values:
# - S1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# tolerations: []
# primaryPreferTagSelector:
# region: us-west-2
# zone: us-west-2c
# priorityClass: ""
# annotations: {}
# labels: {}
# podSecurityContext: {}
# containerSecurityContext: {}
# nodeSelector: {}
# livenessProbe:
# failureThreshold: 4
# initialDelaySeconds: 60
# periodSeconds: 30
# timeoutSeconds: 10
# startupDelaySeconds: 7200
# readinessProbe:
# failureThreshold: 8
# initialDelaySeconds: 10
# periodSeconds: 3
# successThreshold: 1
# timeoutSeconds: 2
# runtimeClassName: image-rc
# storage:
# engine: wiredTiger
# wiredTiger:
# engineConfig:
# cacheSizeRatio: 0.5
# directoryForIndexes: false
# journalCompressor: snappy
# collectionConfig:
# blockCompressor: snappy
# indexConfig:
# prefixCompression: true
# inMemory:
# engineConfig:
# inMemorySizeRatio: 0.5
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
# name: rs-sidecar-1
# volumeMounts:
# - mountPath: /volume1
# name: sidecar-volume-claim
# - mountPath: /secret
# name: sidecar-secret
# - mountPath: /configmap
# name: sidecar-config
# sidecarVolumes:
# - name: sidecar-secret
# secret:
# secretName: mysecret
# - name: sidecar-config
# configMap:
# name: myconfigmap
# sidecarPVCs:
# - apiVersion: v1
# kind: PersistentVolumeClaim
# metadata:
# name: sidecar-volume-claim
# spec:
# resources:
# requests:
# storage: 1Gi
# volumeMode: Filesystem
# accessModes:
# - ReadWriteOnce
podDisruptionBudget:
maxUnavailable: 1
# splitHorizons:
# my-cluster-name-rs0-0:
# external: rs0-0.mycluster.xyz
# external-2: rs0-0.mycluster2.xyz
# my-cluster-name-rs0-1:
# external: rs0-1.mycluster.xyz
# external-2: rs0-1.mycluster2.xyz
# my-cluster-name-rs0-2:
# external: rs0-2.mycluster.xyz
# external-2: rs0-2.mycluster2.xyz
expose:
enabled: false
type: ClusterIP
# loadBalancerIP: 10.0.0.0
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# labels:
# some-label: some-key
# internalTrafficPolicy: Local
# schedulerName: ""
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
pvc:
# annotations:
# volume.beta.kubernetes.io/storage-class: example-hostpath
# labels:
# rack: rack-22
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 3Gi
# hostAliases:
# - ip: "10.10.0.2"
# hostnames:
# - "host1"
# - "host2"
nonvoting:
enabled: false
# podSecurityContext: {}
# containerSecurityContext: {}
size: 3
# configuration: |
# operationProfiling:
# mode: slowOp
# systemLog:
# verbosity: 1
# serviceAccountName: percona-server-mongodb-operator
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: security
# operator: In
# values:
# - S1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# tolerations: []
# priorityClass: ""
# annotations: {}
# labels: {}
# nodeSelector: {}
podDisruptionBudget:
maxUnavailable: 1
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
pvc:
# annotations:
# volume.beta.kubernetes.io/storage-class: example-hostpath
# labels:
# rack: rack-22
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 3Gi
arbiter:
enabled: false
size: 1
# serviceAccountName: percona-server-mongodb-operator
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: security
# operator: In
# values:
# - S1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# tolerations: []
# priorityClass: ""
# annotations: {}
# labels: {}
# nodeSelector: {}
sharding:
enabled: true
balancer:
enabled: true
configrs:
size: 3
# terminationGracePeriodSeconds: 300
# externalNodes:
# - host: 34.124.76.90
# - host: 34.124.76.91
# port: 27017
# votes: 0
# priority: 0
# - host: 34.124.76.92
# configuration: |
# operationProfiling:
# mode: slowOp
# systemLog:
# verbosity: 1
# serviceAccountName: percona-server-mongodb-operator
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-server-mongodb
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: security
# operator: In
# values:
# - S1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# tolerations: []
# priorityClass: ""
# annotations: {}
# labels: {}
# podSecurityContext: {}
# containerSecurityContext: {}
# nodeSelector: {}
# livenessProbe: {}
# readinessProbe: {}
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
# name: rs-sidecar-1
# volumeMounts:
# - mountPath: /volume1
# name: sidecar-volume-claim
# sidecarPVCs: []
# sidecarVolumes: []
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: false
type: ClusterIP
# loadBalancerIP: 10.0.0.0
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# labels:
# some-label: some-key
# internalTrafficPolicy: Local
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
pvc:
# annotations:
# volume.beta.kubernetes.io/storage-class: example-hostpath
# labels:
# rack: rack-22
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 3Gi
# hostAliases:
# - ip: "10.10.0.2"
# hostnames:
# - "host1"
# - "host2"
mongos:
size: 3
# terminationGracePeriodSeconds: 300
# configuration: |
# systemLog:
# verbosity: 1
# serviceAccountName: percona-server-mongodb-operator
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: percona-server-mongodb
# maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: security
# operator: In
# values:
# - S1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# tolerations: []
# priorityClass: ""
# annotations: {}
# labels: {}
# podSecurityContext: {}
# containerSecurityContext: {}
# nodeSelector: {}
# livenessProbe: {}
# readinessProbe: {}
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
# name: rs-sidecar-1
# volumeMounts:
# - mountPath: /volume1
# name: sidecar-volume-claim
# sidecarPVCs: []
# sidecarVolumes: []
podDisruptionBudget:
maxUnavailable: 1
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
expose:
enabled: false
type: ClusterIP
# loadBalancerIP: 10.0.0.0/8
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# labels:
# some-label: some-key
# internalTrafficPolicy: Local
# nodePort: 32017
# auditLog:
# destination: file
# format: BSON
# filter: '{}'
# hostAliases:
# - ip: "10.10.0.2"
# hostnames:
# - "host1"
# - "host2"
# users:
# - name: my-user
# db: admin
# passwordSecretRef:
# name: my-user-password
# key: my-user-password-key
# roles:
# - name: clusterAdmin
# db: admin
# - name: userAdminAnyDatabase
# db: admin
# - name: my-usr
# db: admin
# passwordSecretRef:
# name: my-user-pwd
# key: my-user-pwd-key
# roles:
# - name: dbOwner
# db: sometest
# roles:
# - role: myClusterwideAdmin
# db: admin
# privileges:
# - resource:
# cluster: true
# actions:
# - addShard
# - resource:
# db: config
# collection: ''
# actions:
# - find
# - update
# - insert
# - remove
# roles:
# - role: read
# db: admin
# - role: my-role
# db: myDb
# privileges:
# - resource:
# db: ''
# collection: ''
# actions:
# - find
# authenticationRestrictions:
# - clientSource:
# - 127.0.0.1
# serverAddress:
# - 127.0.0.1
backup:
enabled: false
image:
repository: percona/percona-backup-mongodb
tag: 2.7.0-multi
# annotations:
# iam.amazonaws.com/role: role-arn
# podSecurityContext: {}
# containerSecurityContext: {}
# resources:
# limits:
# cpu: "300m"
# memory: "1.2G"
# requests:
# cpu: "300m"
# memory: "1G"
storages:
# s3-us-west:
# type: s3
# s3:
# bucket: S3-BACKUP-BUCKET-NAME-HERE
# credentialsSecret: my-cluster-name-backup-s3
# serverSideEncryption:
# kmsKeyID: 1234abcd-12ab-34cd-56ef-1234567890ab
# sseAlgorithm: aws:kms
# sseCustomerAlgorithm: AES256
# sseCustomerKey: Y3VzdG9tZXIta2V5
# retryer:
# numMaxRetries: 3
# minRetryDelay: 30ms
# maxRetryDelay: 5m
# region: us-west-2
# prefix: ""
# uploadPartSize: 10485760
# maxUploadParts: 10000
# storageClass: STANDARD
# insecureSkipTLSVerify: false
# minio:
# type: s3
# s3:
# bucket: MINIO-BACKUP-BUCKET-NAME-HERE
# region: us-east-1
# credentialsSecret: my-cluster-name-backup-minio
# endpointUrl: http://minio.psmdb.svc.cluster.local:9000/minio/
# prefix: ""
# azure-blob:
# type: azure
# azure:
# container: percona-container
# prefix: backups
# endpointUrl: https://perconasa.blob.core.windows.net
# credentialsSecret: perconasasecret
pitr:
enabled: false
oplogOnly: false
# oplogSpanMin: 10
# compressionType: gzip
# compressionLevel: 6
# configuration:
# backupOptions:
# priority:
# "localhost:28019": 2.5
# "localhost:27018": 2.5
# timeouts:
# startingStatus: 33
# oplogSpanMin: 10
# restoreOptions:
# batchSize: 500
# numInsertionWorkers: 10
# numDownloadWorkers: 4
# maxDownloadBufferMb: 0
# downloadChunkMb: 32
# mongodLocation: /usr/bin/mongo
# mongodLocationMap:
# "node01:2017": /usr/bin/mongo
# "node03:27017": /usr/bin/mongo
tasks:
# - name: daily-s3-us-west
# enabled: true
# schedule: "0 0 * * *"
# keep: 3
# storageName: s3-us-west
# compressionType: gzip
# - name: weekly-s3-us-west
# enabled: false
# schedule: "0 0 * * 0"
# keep: 5
# storageName: s3-us-west
# compressionType: gzip
# - name: weekly-s3-us-west-physical
# enabled: false
# schedule: "0 5 * * 0"
# keep: 5
# type: physical
# storageName: s3-us-west
# compressionType: gzip
# compressionLevel: 6
# If you set systemUsers here the secret will be constructed by helm with these values
# systemUsers:
# MONGODB_BACKUP_USER: backup
# MONGODB_BACKUP_PASSWORD: backup123456
# MONGODB_DATABASE_ADMIN_USER: databaseAdmin
# MONGODB_DATABASE_ADMIN_PASSWORD: databaseAdmin123456
# MONGODB_CLUSTER_ADMIN_USER: clusterAdmin
# MONGODB_CLUSTER_ADMIN_PASSWORD: clusterAdmin123456
# MONGODB_CLUSTER_MONITOR_USER: clusterMonitor
# MONGODB_CLUSTER_MONITOR_PASSWORD: clusterMonitor123456
# MONGODB_USER_ADMIN_USER: userAdmin
# MONGODB_USER_ADMIN_PASSWORD: userAdmin123456
# PMM_SERVER_API_KEY: apikey
# # PMM_SERVER_USER: admin
# # PMM_SERVER_PASSWORD: admin
backup:
enabled: true
annotations:
description: "test"
name: backup
labels:
app: mongo-backup
environment: testing
clusterName: mdb-db-psmdb-db
storageName: azure-blob
type: logical
restore:
enabled: true
annotations:
description: "test"
name: restore1
labels:
app: mongo-restore
environment: testing
clusterName: mdb-db-psmdb-db
backupName: backup

View File

@ -0,0 +1,21 @@
apiVersion: v2
name: redis-cluster
description: Provides easy redis setup definitions for Kubernetes services, and deployment.
version: 0.15.8
appVersion: "0.15.1"
home: https://github.com/ot-container-kit/redis-operator
sources:
- https://github.com/ot-container-kit/redis-operator
maintainers:
- name: iamabhishek-dubey
- name: sandy724
- name: shubham-cmyk
keywords:
- operator
- redis
- opstree
- kubernetes
- openshift
- redis-exporter
icon: https://github.com/OT-CONTAINER-KIT/redis-operator/raw/master/static/redis-operator-logo.svg
type: application

View File

@ -0,0 +1,65 @@
# Redis Cluster
Redis is a key-value based distributed database, this helm chart is for redis cluster setup. This helm chart needs [Redis Operator](../redis-operator) inside Kubernetes cluster. The redis cluster definition can be modified or changed by [values.yaml](./values.yaml).
```shell
helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/
helm install <my-release> ot-helm/redis-cluster \
--set redisCluster.clusterSize=3 --namespace <namespace>
```
Redis setup can be upgraded by using `helm upgrade` command:-
```shell
helm upgrade <my-release> ot-helm/redis-cluster --install \
--set redisCluster.clusterSize=5 --namespace <namespace>
```
For uninstalling the chart:-
```shell
helm delete <my-release> --namespace <namespace>
```
## Pre-Requisities
- Kubernetes 1.15+
- Helm 3.X
- Redis Operator 0.7.0
## Parameters
| **Name** | **Default Value** | **Description** |
|------------------------------------|--------------------------------|-----------------------------------------------------------------------------------------------|
| `imagePullSecrets` | [] | List of image pull secrets, in case redis image is getting pull from private registry |
| `redisCluster.clusterSize` | 3 | Size of the redis cluster leader and follower nodes |
| `redisCluster.clusterVersion` | v7 | Major version of Redis setup, values can be v6 or v7 |
| `redisCluster.persistenceEnabled` | true | Persistence should be enabled or not in the Redis cluster setup |
| `redisCluster.secretName` | redis-secret | Name of the existing secret in Kubernetes |
| `redisCluster.secretKey` | password | Name of the existing secret key in Kubernetes |
| `redisCluster.image` | quay.io/opstree/redis | Name of the redis image |
| `redisCluster.tag` | v6.2 | Tag of the redis image |
| `redisCluster.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis image |
| `redisCluster.leaderServiceType` | ClusterIP | Kubernetes service type for Redis Leader |
| `redisCluster.followerServiceType` | ClusterIP | Kubernetes service type for Redis Follower |
| `externalService.enabled` | false | If redis service needs to be exposed using LoadBalancer or NodePort |
| `externalService.annotations` | {} | Kubernetes service related annotations |
| `externalService.serviceType` | NodePort | Kubernetes service type for exposing service, values - ClusterIP, NodePort, and LoadBalancer |
| `externalService.port` | 6379 | Port number on which redis external service should be exposed |
| `serviceMonitor.enabled` | false | Servicemonitor to monitor redis with Prometheus |
| `serviceMonitor.interval` | 30s | Interval at which metrics should be scraped. |
| `serviceMonitor.scrapeTimeout` | 10s | Timeout after which the scrape is ended |
| `serviceMonitor.namespace` | monitoring | Namespace in which Prometheus operator is running |
| `redisExporter.enabled` | true | Redis exporter should be deployed or not |
| `redisExporter.image` | quay.io/opstree/redis-exporter | Name of the redis exporter image |
| `redisExporter.tag` | v6.2 | Tag of the redis exporter image |
| `redisExporter.imagePullPolicy` | IfNotPresent | Image Pull Policy of the redis exporter image |
| `redisExporter.env` | [] | Extra environment variables which needs to be added in redis exporter |
| `sidecars` | [] | Sidecar for redis pods |
| `nodeSelector` | {} | NodeSelector for redis statefulset |
| `priorityClassName` | "" | Priority class name for the redis statefulset |
| `storageSpec` | {} | Storage configuration for redis setup |
| `securityContext` | {} | Security Context for redis pods for changing system or kernel level parameters |
| `affinity` | {} | Affinity for node and pods for redis statefulset |
| `tolerations` | [] | Tolerations for redis statefulset |

View File

@ -0,0 +1,90 @@
{{/* vim: set filetype=mustache: */}}
{{/* Define common labels */}}
{{- define "common.labels" -}}
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
{{- if .Values.labels }}
{{- range $labelkey, $labelvalue := .Values.labels }}
{{ $labelkey}}: {{ $labelvalue }}
{{- end }}
{{- end }}
{{- end -}}
{{/* Helper for Redis Cluster (leader & follower) */}}
{{- define "redis.role" -}}
{{- if .affinity }}
affinity:
{{- toYaml .affinity | nindent 2 }}
{{- end }}
{{- if .tolerations }}
tolerations:
{{- toYaml .tolerations | nindent 2 }}
{{- end }}
{{- if .pdb.enabled }}
pdb:
enabled: {{ .pdb.enabled }}
maxUnavailable: {{ .pdb.maxUnavailable }}
minAvailable: {{ .pdb.minAvailable }}
{{- end }}
{{- if .nodeSelector }}
nodeSelector:
{{- toYaml .nodeSelector | nindent 2 }}
{{- end }}
{{- if .securityContext }}
securityContext:
{{- toYaml .securityContext | nindent 2 }}
{{- end }}
{{- end -}}
{{/* Generate sidecar properties */}}
{{- define "sidecar.properties" -}}
{{- with .Values.sidecars }}
name: {{ .name }}
image: {{ .image }}
{{- if .imagePullPolicy }}
imagePullPolicy: {{ .imagePullPolicy }}
{{- end }}
{{- if .resources }}
resources:
{{ toYaml .resources | nindent 2 }}
{{- end }}
{{- if .env }}
env:
{{ toYaml .env | nindent 2 }}
{{- end }}
{{- end }}
{{- end -}}
{{/* Generate init container properties */}}
{{- define "initContainer.properties" -}}
{{- with .Values.initContainer }}
{{- if .enabled }}
image: {{ .image }}
{{- if .imagePullPolicy }}
imagePullPolicy: {{ .imagePullPolicy }}
{{- end }}
{{- if .resources }}
resources:
{{ toYaml .resources | nindent 2 }}
{{- end }}
{{- if .env }}
env:
{{ toYaml .env | nindent 2 }}
{{- end }}
{{- if .command }}
command:
{{ toYaml .command | nindent 2 }}
{{- end }}
{{- if .args }}
args:
{{ toYaml .args | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{- if eq .Values.externalConfig.enabled true }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-ext-config
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
data:
redis-additional.conf: |
{{ .Values.externalConfig.data | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if and (gt (int .Values.redisCluster.follower.replicas) 0) (eq .Values.externalService.enabled true) }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-follower-external-service
{{- if .Values.externalService.annotations }}
annotations:
{{ toYaml .Values.externalService.annotations | indent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
spec:
type: {{ .Values.externalService.serviceType }}
selector:
app: {{ .Release.Name }}-follower
redis_setup_type: cluster
role: follower
ports:
- protocol: TCP
port: {{ .Values.externalService.port }}
targetPort: 6379
name: client
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if and (eq .Values.serviceMonitor.enabled true) (gt (int .Values.redisCluster.follower.replicas) 0) }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ .Release.Name }}-follower-prometheus-monitoring
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
spec:
selector:
matchLabels:
app: {{ .Release.Name }}-follower
redis_setup_type: cluster
role: follower
endpoints:
- port: redis-exporter
interval: {{ .Values.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
namespaceSelector:
matchNames:
- {{ .Values.serviceMonitor.namespace }}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if and (gt (int .Values.redisCluster.leader.replicas) 0) (eq .Values.externalService.enabled true) }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-leader-external-service
{{- if .Values.externalService.annotations }}
annotations:
{{ toYaml .Values.externalService.annotations | indent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
spec:
type: {{ .Values.externalService.serviceType }}
selector:
app: {{ .Release.Name }}-leader
redis_setup_type: cluster
role: leader
ports:
- protocol: TCP
port: {{ .Values.externalService.port }}
targetPort: 6379
name: client
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if and (eq .Values.serviceMonitor.enabled true) (gt (int .Values.redisCluster.leader.replicas) 0) }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ .Release.Name }}-leader-prometheus-monitoring
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
spec:
selector:
matchLabels:
app: {{ .Release.Name }}-leader
redis_setup_type: cluster
role: leader
endpoints:
- port: redis-exporter
interval: {{ .Values.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
namespaceSelector:
matchNames:
- {{ .Values.serviceMonitor.namespace }}
{{- end }}

View File

@ -0,0 +1,82 @@
---
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster
metadata:
name: {{ .Release.Name }}
labels: {{- include "common.labels" . | nindent 4 }}
spec:
clusterSize: {{ .Values.redisCluster.clusterSize }}
persistenceEnabled: {{ .Values.redisCluster.persistenceEnabled }}
clusterVersion: {{ .Values.redisCluster.clusterVersion }}
redisLeader: {{- include "redis.role" .Values.redisCluster.leader | nindent 4 }}
replicas: {{ .Values.redisCluster.leader.replicas }}
{{- if .Values.externalConfig.enabled }}
redisConfig:
additionalRedisConfig: "{{ .Release.Name }}-ext-config"
{{- end }}
redisFollower: {{- include "redis.role" .Values.redisCluster.follower | nindent 4 }}
replicas: {{ .Values.redisCluster.follower.replicas }}
{{- if .Values.externalConfig.enabled }}
redisConfig:
additionalRedisConfig: "{{ .Release.Name }}-ext-config"
{{- end }}
redisExporter:
enabled: {{ .Values.redisExporter.enabled }}
image: "{{ .Values.redisExporter.image }}:{{ .Values.redisExporter.tag }}"
imagePullPolicy: "{{ .Values.redisExporter.imagePullPolicy }}"
{{- if .Values.redisExporter.resources}}
resources: {{ toYaml .Values.redisExporter.resources | nindent 6 }}
{{- end }}
kubernetesConfig:
image: "{{ .Values.redisCluster.image }}:{{ .Values.redisCluster.tag }}"
imagePullPolicy: "{{ .Values.redisCluster.imagePullPolicy }}"
{{- if .Values.redisCluster.imagePullSecrets}}
imagePullSecrets: {{ toYaml .Values.redisCluster.imagePullSecrets | nindent 4 }}
{{- end }}
{{- if .Values.redisExporter.resources}}
resources: {{ toYaml .Values.redisCluster.resources | nindent 6 }}
{{- end }}
{{- if and .Values.redisCluster.redisSecret.secretName .Values.redisCluster.redisSecret.secretKey }}
redisSecret:
name: "{{ .Values.redisCluster.redisSecret.secretName | quote }}"
key: "{{ .Values.redisCluster.redisSecret.secretKey | quote }}"
{{- end }}
{{- if .Values.storageSpec }}
storage: {{ toYaml .Values.storageSpec | nindent 4 }}
{{- end }}
{{- if and .Values.priorityClassName (ne .Values.priorityClassName "") }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.podSecurityContext }}
podSecurityContext: {{ toYaml .Values.podSecurityContext | nindent 4 }}
{{- end }}
{{- if and .Values.TLS.ca .Values.TLS.cert .Values.TLS.key .Values.TLS.secret.secretName }}
TLS:
ca: {{ .Values.TLS.ca | quote }}
cert: {{ .Values.TLS.cert | quote }}
key: {{ .Values.TLS.key | quote }}
secret:
secretName: {{ .Values.TLS.secret.secretName | quote }}
{{- end }}
{{- if and .Values.acl.secret (ne .Values.acl.secret.secretName "") }}
acl:
secret:
secretName: {{ .Values.acl.secret.secretName | quote }}
{{- end }}
{{- if and .Values.sidecars (ne .Values.sidecars.name "") (ne .Values.sidecars.image) }}
sidecars: {{ include "sidecar.properties" | nindent 4 }}
{{- end }}
{{- if and .Values.initContainers .Values.initContainer.enabled (ne .Values.initContainers.name "") (ne .Values.initContainers.image) }}
initContainers: {{ include "initContainer.properties" | nindent 4 }}
{{- end }}
{{- if .Values.env }}
env: {{ toYaml .Values.env | nindent 4 }}
{{- end }}
{{- if and .Values.serviceAccountName (ne .Values.serviceAccountName "") }}
serviceAccountName: "{{ .Values.serviceAccountName }}"
{{- end }}

View File

@ -0,0 +1,180 @@
---
redisCluster:
clusterSize: 3
clusterVersion: v7
persistenceEnabled: true
image: quay.io/opstree/redis
tag: v7.0.12
imagePullPolicy: IfNotPresent
imagePullSecrets: {}
# - name: Secret with Registry credentials
redisSecret:
secretName: ""
secretKey: ""
resources: {}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 100m
# memory: 128Mi
leader:
replicas: 3
serviceType: ClusterIP
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: disktype
# operator: In
# values:
# - ssd
tolerations: []
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: null
# memory: medium
securityContext : {}
pdb:
enabled: false
maxUnavailable: 1
minAvailable: 1
follower:
replicas: 3
serviceType: ClusterIP
affinity: null
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: disktype
# operator: In
# values:
# - ssd
tolerations: []
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: null
# memory: medium
securityContext : {}
pdb:
enabled: false
maxUnavailable: 1
minAvailable: 1
labels: {}
# foo: bar
# test: echo
externalConfig:
enabled: false
data: |
tcp-keepalive 400
slowlog-max-len 158
stream-node-max-bytes 2048
externalService:
enabled: false
# annotations:
# foo: bar
serviceType: LoadBalancer
port: 6379
serviceMonitor:
enabled: false
interval: 30s
scrapeTimeout: 10s
namespace: monitoring
redisExporter:
enabled: false
image: quay.io/opstree/redis-exporter
tag: "v1.44.0"
imagePullPolicy: IfNotPresent
resources: {}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 100m
# memory: 128Mi
sidecars:
name: ""
image: ""
imagePullPolicy: "IfNotPresent"
resources:
limits:
cpu: "100m"
memory: "128Mi"
requests:
cpu: "50m"
memory: "64Mi"
env: {}
# - name: MY_ENV_VAR
# value: "my-env-var-value"
initContainer:
enabled: false
image: ""
imagePullPolicy: "IfNotPresent"
resources: {}
# requests:
# memory: "64Mi"
# cpu: "250m"
# limits:
# memory: "128Mi"
# cpu: "500m"
env: []
command: []
args: []
priorityClassName: ""
storageSpec:
volumeClaimTemplate:
spec:
# storageClassName: standard
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
nodeConfVolume: true
nodeConfVolumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
# selector: {}
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
# serviceAccountName: redis-sa
TLS:
ca: ca.key
cert: tls.crt
key: tls.key
secret:
secretName: ""
acl :
secret :
secretName: ""
env : []
# - name: VAR_NAME
# value: "value1"
serviceAccountName : ""

1
charts/redis-operator/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.tgz

View File

@ -0,0 +1,6 @@
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.12.4
digest: sha256:026d03c56e2f8369b0f7d79f9560d5a33b2c5ae8a7d751213e56e2a0176cb874
generated: "2023-10-02T14:14:45.164829041+05:30"

View File

@ -0,0 +1,28 @@
---
apiVersion: v2
version: 0.15.7
appVersion: "0.15.1"
description: Provides easy redis setup definitions for Kubernetes services, and deployment.
engine: gotpl
maintainers:
- name: iamabhishek-dubey
- name: sandy724
- name: shubham-cmyk
name: redis-operator
sources:
- https://github.com/OT-CONTAINER-KIT/redis-operator
home: https://github.com/OT-CONTAINER-KIT/redis-operator
icon: https://github.com/OT-CONTAINER-KIT/redis-operator/raw/master/static/redis-operator-logo.svg
keywords:
- operator
- redis
- opstree
- kubernetes
- openshift
dependencies:
- name: cert-manager
version: v1.12.4
repository: https://charts.jetstack.io
alias: certmanager
condition: certmanager.enabled

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,112 @@
# Redis Operator Helm Chart
## Introduction
This Helm chart deploys the redis-operator into your Kubernetes cluster. The operator facilitates the deployment, scaling, and management of Redis clusters and other Redis resources provided by the OpsTree Solutions team.
## Pre-requisites
- Helm v3+
- Kubernetes v1.16+
- If you intend to use the cert-manager, ensure that the cert-manager CRDs are installed before deploying the redis-operator.
## Installation Steps
### 1. Add Helm Repository
```bash
helm repo add ot-helm https://ot-container-kit.github.io/helm-charts
```
### 2. Install Cert-Manager CRDs (if using cert-manager)
If you plan to use cert-manager with the redis-operator, you need to install the cert-manager CRDs before deploying the operator.
```bash
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.crds.yaml
```
### 3. Install Redis Operator
Replace `<YourCertSecretName>` and `<YourPrivateKey>` with your specific values.
```bash
helm install <redis-operator> ot-helm/redis-operator --version=0.15.5 --appVersion=0.15.1 --set certificate.secretName=<YourCertSecretName> --set certmanager.enabled=true --set redisOperator.webhook=true --namespace <redis-operator> --create-namespace
```
> Note: If `certificate.secretName` is not provided, the operator will generate a self-signed certificate and use it for webhook server.
---
> Note : If you want to disable the webhook you have to pass the `--set webhook=false` and `--set certmanager.enabled=false` while installing the redis-operator.
### 4. Patch the CA Bundle (if using cert-manager)
Cert-manager injects the CA bundle into the webhook configuration.
```bash
kubectl patch crd redis.redis.redis.opstreelabs.in -p '{"metadata":{"annotations":{"cert-manager.io/inject-ca-from":"<redis-operator>/<serving-cert>"}}}'
kubectl patch crd redisclusters.redis.redis.opstreelabs.in -p '{"metadata":{"annotations":{"cert-manager.io/inject-ca-from":"<redis-operator>/<serving-cert>"}}}'
kubectl patch crd redisreplications.redis.redis.opstreelabs.in -p '{"metadata":{"annotations":{"cert-manager.io/inject-ca-from":"<redis-operator>/<serving-cert>"}}}'
kubectl patch crd redissentinels.redis.redis.opstreelabs.in -p '{"metadata":{"annotations":{"cert-manager.io/inject-ca-from":"<redis-operator>/<serving-cert>"}}}'
```
> Note: Replace `<redis-operator>` and `<serving-cert>` with your specific values i.e. release name and certificate name.
#### You can verify the patch by running the following commands
```bash
kubectl get crd redis.redis.redis.opstreelabs.in -o=jsonpath='{.metadata.annotations}'
kubectl get crd redisclusters.redis.redis.opstreelabs.in -o=jsonpath='{.metadata.annotations}'
kubectl get crd redisreplications.redis.redis.opstreelabs.in -o=jsonpath='{.metadata.annotations}'
kubectl get crd redissentinels.redis.redis.opstreelabs.in -o=jsonpath='{.metadata.annotations}'
```
### How to generate private key( Optional )
```bash
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt
kubectl create secret tls <webhook-server-cert> --key tls.key --cert tls.crt -n <redis-operator>
```
> Note: This secret will be used for webhook server certificate so generate it before installing the redis-operator.
## Default Values
| Parameter | Description | Default |
|-------------------------------------|------------------------------------|--------------------------------------------------------------|
| `redisOperator.name` | Operator name | `redis-operator` |
| `redisOperator.imageName` | Image repository | `quay.io/opstree/redis-operator` |
| `redisOperator.imageTag` | Image tag | `{{appVersion}}` |
| `redisOperator.imagePullPolicy` | Image pull policy | `Always` |
| `redisOperator.podAnnotations` | Additional pod annotations | `{}` |
| `redisOperator.podLabels` | Additional Pod labels | `{}` |
| `redisOperator.extraArgs` | Additional arguments for the operator | `{}` |
| `redisOperator.watch_namespace` | Namespace for the operator to watch | `""` |
| `redisOperator.env` | Environment variables for the operator | `{}` |
| `redisOperator.webhook` | Enable webhook | `false` |
| `resources.limits.cpu` | CPU limit | `500m` |
| `resources.limits.memory` | Memory limit | `500Mi` |
| `resources.requests.cpu` | CPU request | `500m` |
| `resources.requests.memory` | Memory request | `500Mi` |
| `replicas` | Number of replicas | `1` |
| `serviceAccountName` | Service account name | `redis-operator` |
| `certificate.name` | Certificate name | `serving-cert` |
| `certificate.secretName` | Certificate secret name | `webhook-server-cert` |
| `issuer.type` | Issuer type | `selfSigned` |
| `issuer.name` | Issuer name | `redis-operator-issuer` |
| `issuer.email` | Issuer email | `shubham.gupta@opstree.com` |
| `issuer.server` | Issuer server URL | `https://acme-v02.api.letsencrypt.org/directory` |
| `issuer.privateKeySecretName` | Private key secret name | `letsencrypt-prod` |
| `certManager.enabled` | Enable cert-manager | `false` |
## Scheduling Parameters
| Parameter | Description | Default |
|-------------------------|--------------------------------------------|----------|
| `priorityClassName` | Priority class name for the pods | `""` |
| `nodeSelector` | Labels for pod assignment | `{}` |
| `tolerateAllTaints` | Whether to tolerate all node taints | `false` |
| `tolerations` | Taints to tolerate | `[]` |
| `affinity` | Affinity rules for pod assignment | `{}` |

View File

@ -0,0 +1,34 @@
{{/* vim: set filetype=mustache: */}}
{{/* Define issuer spec based on the type */}}
{{- define "redis-operator.issuerSpec" -}}
{{- if eq .Values.issuer.type "acme" }}
acme:
email: {{ .Values.issuer.email }}
server: {{ .Values.issuer.server }}
privateKeySecretRef:
name: {{ .Values.issuer.privateKeySecretName }}
solvers:
- http01:
ingress:
class: {{ .Values.issuer.solver.ingressClass }}
{{- else }}
selfSigned: {}
{{- end }}
{{- end -}}
{{/* Common labels */}}
{{- define "redisOperator.labels" -}}
app.kubernetes.io/name: {{ .Values.redisOperator.name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: operator
app.kubernetes.io/part-of: {{ .Release.Name }}
{{- end }}
{{/* Selector labels */}}
{{- define "redisOperator.selectorLabels" -}}
name: {{ .Values.redisOperator.name }}
{{- end }}

View File

@ -0,0 +1,43 @@
{{ if .Values.certmanager.enabled }}
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ .Values.issuer.name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.redisOperator.name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: issuer
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
{{- include "redis-operator.issuerSpec" . | nindent 2 }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .Values.certificate.name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Values.redisOperator.name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: certificate
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
dnsNames:
- {{ .Values.service.name }}.{{ .Values.service.namespace }}.svc
- {{ .Values.service.name }}.{{ .Values.service.namespace }}.svc.cluster.local
issuerRef:
kind: Issuer
name: {{ .Values.issuer.name }}
secretName: {{ .Values.certificate.secretName }}
{{ end }}

View File

@ -0,0 +1,76 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.redisOperator.name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "redisOperator.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels: {{- include "redisOperator.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ .Values.certificate.name }}
{{- with .Values.redisOperator.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels: {{- include "redisOperator.selectorLabels" . | nindent 8 }}
{{- with .Values.redisOperator.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
containers:
- name: "{{ .Values.redisOperator.name }}"
image: "{{ .Values.redisOperator.imageName }}:{{ .Values.redisOperator.imageTag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.redisOperator.imagePullPolicy }}
command:
- /manager
args:
- --leader-elect
{{- range $arg := .Values.redisOperator.extraArgs }}
- {{ $arg }}
{{- end }}
{{- if .Values.redisOperator.webhook }}
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
{{- end }}
env:
- name: ENABLE_WEBHOOKS
value: "{{ .Values.redisOperator.webhook | toString }}"
{{- if .WATCH_NAMESPACE }}
- name: WATCH_NAMESPACE
value: {{ .WATCH_NAMESPACE }}
{{- end }}
{{- range $env := .Values.redisOperator.env }}
- name: {{ $env.name }}
value: {{ $env.value | quote }}
{{- end }}
{{- if .Values.resources }}
resources: {{ toYaml .Values.resources | nindent 10 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName}}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: "{{ .Values.serviceAccountName }}"
serviceAccount: "{{ .Values.serviceAccountName }}"
{{- if .Values.redisOperator.webhook }}
volumes:
- name: cert
secret:
defaultMode: 420
secretName: {{ .Values.certificate.secretName }}
{{- end }}

View File

@ -0,0 +1,21 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.redisOperator.name }}
labels:
app.kubernetes.io/name : {{ .Values.redisOperator.name }}
helm.sh/chart : {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by : {{ .Release.Service }}
app.kubernetes.io/instance : {{ .Release.Name }}
app.kubernetes.io/version : {{ .Chart.AppVersion }}
app.kubernetes.io/component: role-binding
app.kubernetes.io/part-of : {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccountName }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.redisOperator.name }}
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,126 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Values.redisOperator.name }}
labels:
app.kubernetes.io/name : {{ .Values.redisOperator.name }}
helm.sh/chart : {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by : {{ .Release.Service }}
app.kubernetes.io/instance : {{ .Release.Name }}
app.kubernetes.io/version : {{ .Chart.AppVersion }}
app.kubernetes.io/component: role
app.kubernetes.io/part-of : {{ .Release.Name }}
rules:
- apiGroups:
- redis.redis.opstreelabs.in
resources:
- rediss
- redisclusters
- redisreplications
- redis
- rediscluster
- redissentinel
- redissentinels
- redisreplication
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- nonResourceURLs:
- '*'
verbs:
- get
- apiGroups:
- "apiextensions.k8s.io"
resources:
- "customresourcedefinitions"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- redis.redis.opstreelabs.in
resources:
- redis/finalizers
- rediscluster/finalizers
- redisclusters/finalizers
- redissentinel/finalizers
- redissentinels/finalizers
- redisreplication/finalizers
- redisreplications/finalizers
verbs:
- update
- apiGroups:
- redis.redis.opstreelabs.in
resources:
- redis/status
- rediscluster/status
- redisclusters/status
- redissentinel/status
- redissentinels/status
- redisreplication/status
- redisreplications/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- secrets
- pods/exec
- pods
- services
- configmaps
- events
- persistentvolumeclaims
- namespace
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- "policy"
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch

View File

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.redisOperator.name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name : {{ .Values.redisOperator.name }}
helm.sh/chart : {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by : {{ .Release.Service }}
app.kubernetes.io/instance : {{ .Release.Name }}
app.kubernetes.io/version : {{ .Chart.AppVersion }}
app.kubernetes.io/component: service-account
app.kubernetes.io/part-of : {{ .Release.Name }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name : {{ .Values.redisOperator.name }}
helm.sh/chart : {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by : {{ .Release.Service }}
app.kubernetes.io/instance : {{ .Release.Name }}
app.kubernetes.io/version : {{ .Chart.AppVersion }}
app.kubernetes.io/component: webhook
app.kubernetes.io/part-of : {{ .Release.Name }}
name: {{ .Values.service.name }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443
selector:
name: {{ .Values.redisOperator.name }}

View File

@ -0,0 +1,60 @@
---
redisOperator:
name: redis-operator
imageName: quay.io/opstree/redis-operator
# Overrides the image tag whose default is the chart appVersion.
imageTag: ""
imagePullPolicy: Always
# Additional pod annotations
podAnnotations: {}
# Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {}
# Additional arguments for redis-operator container
extraArgs: []
# - -zap-log-level=error
watch_namespace: ""
env: []
webhook: false
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 500m
memory: 500Mi
replicas: 1
serviceAccountName: redis-operator
service:
name: webhook-service
namespace: redis-operator
certificate:
name: serving-cert
secretName: webhook-server-cert
issuer:
type: selfSigned
name: redis-operator-issuer
email: shubham.gupta@opstree.com
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretName: letsencrypt-prod
solver:
enabled: true
ingressClass: nginx
certmanager:
enabled: false
priorityClassName: ""
nodeSelector: {}
tolerateAllTaints: false
tolerations: []
affinity: {}

View File

@ -0,0 +1,22 @@
apiVersion: v2
name: redis-replication
description: Provides easy redis setup definitions for Kubernetes services, and deployment.
type: application
engine: gotpl
maintainers:
- name: iamabhishek-dubey
- name: sandy724
- name: shubham-cmyk
sources:
- https://github.com/ot-container-kit/redis-operator
version: 0.15.7
appVersion: "0.15.0"
home: https://github.com/ot-container-kit/redis-operator
keywords:
- operator
- redis
- opstree
- kubernetes
- openshift
- redis-exporter
icon: https://github.com/OT-CONTAINER-KIT/redis-operator/raw/master/static/redis-operator-logo.svg

View File

@ -0,0 +1,17 @@
{{- if eq .Values.externalConfig.enabled true }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-ext-config
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
data:
redis-additional.conf: |
{{ .Values.externalConfig.data | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,83 @@
---
apiVersion: redis.redis.opstreelabs.in/v1beta1
kind: RedisReplication
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
{{- if .Values.labels }}
{{- range $labelkey, $labelvalue := .Values.labels }}
{{ $labelkey}}: {{ $labelvalue }}
{{- end }}
{{- end }}
spec:
clusterSize: {{ .Values.redisReplication.clusterSize }}
{{- if eq .Values.externalConfig.enabled true }}
redisConfig:
additionalRedisConfig: {{ .Release.Name }}-ext-config
{{- end }}
redisExporter:
enabled: {{ .Values.redisExporter.enabled }}
image: "{{ .Values.redisExporter.image }}:{{ .Values.redisExporter.tag }}"
imagePullPolicy: "{{ .Values.redisExporter.imagePullPolicy }}"
resources:
{{ toYaml .Values.redisExporter.resources | indent 6 }}
{{- if .Values.redisExporter.env }}
env:
{{ toYaml .Values.redisExporter.env | indent 4 }}
{{- end }}
kubernetesConfig:
image: "{{ .Values.redisReplication.image }}:{{ .Values.redisReplication.tag }}"
imagePullPolicy: "{{ .Values.redisReplication.imagePullPolicy }}"
resources:
{{ toYaml .Values.redisReplication.resources | indent 6 }}
{{- if .Values.redisReplication.redisSecret }}
redisSecret:
name: "{{ .Values.redisReplication.redisSecret.secretName }}"
key: "{{ .Values.redisReplication.redisSecret.secretKey }}"
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range $imageSecrets := .Values.imagePullSecrets }}
- name: {{ $imageSecrets.name }}
{{- end }}
{{- end }}
{{- if .Values.storageSpec }}
storage:
{{ toYaml .Values.storageSpec | indent 4 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 4 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 4 }}
{{- end }}
{{- if .Values.podSecurityContext }}
podSecurityContext:
{{ toYaml .Values.podSecurityContext | indent 4 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 4 }}
{{- end }}
{{- if .Values.TLS }}
TLS:
{{ toYaml .Values.TLS | indent 4 }}
{{- end}}
{{- if .Values.sidecars }}
sidecars:
{{ toYaml .Values.sidecars | indent 4 }}
{{- end }}
{{- if .Values.serviceAccountName }}
serviceAccountName: "{{ .Values.serviceAccountName }}"
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if eq .Values.externalService.enabled true }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-external-service
{{- if .Values.externalService.annotations }}
annotations:
{{ toYaml .Values.externalService.annotations | indent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
spec:
type: {{ .Values.externalService.serviceType }}
selector:
app: {{ .Release.Name }}
redis_setup_type: replication
role: replication
ports:
- protocol: TCP
port: {{ .Values.externalService.port }}
targetPort: 6379
name: client
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if eq .Values.serviceMonitor.enabled true }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ .Release.Name }}-prometheus-monitoring
labels:
app.kubernetes.io/name: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: middleware
spec:
selector:
matchLabels:
app: {{ .Release.Name }}
redis_setup_type: replication
role: replication
endpoints:
- port: redis-exporter
interval: {{ .Values.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
namespaceSelector:
matchNames:
- {{ .Values.serviceMonitor.namespace }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More