Compare commits

..

3 Commits

Author SHA1 Message Date
litmusbot 9b310244a1 655363891: version upgraded for chaos-charts 2021-03-15 19:34:43 +00:00
Udit Gaurav 77b357656e
Cherry Pick for 1.13.2 (#427)
* charts

Signed-off-by: oumkale <oum.kale@mayadata.io>

* sock-shop workflow

Signed-off-by: oumkale <oum.kale@mayadata.io>

* sock-shop workflow

Signed-off-by: oumkale <oum.kale@mayadata.io>

* sock-shop workflow

Signed-off-by: oumkale <oum.kale@mayadata.io>

* workflow

Signed-off-by: oumkale <oum.kale@mayadata.io>

* predefined workflow

Signed-off-by: oumkale <oum.kale@mayadata.io>

* predefined workflow

Signed-off-by: oumkale <oum.kale@mayadata.io>

* fix root issue (#422)

Signed-off-by: oumkale <oum.kale@mayadata.io>

* update installation of experiment (#419)

Signed-off-by: oumkale <oum.kale@mayadata.io>

* chore(permissions): Adding minimal permissions in all experiments (#423)

* chore(permissions): Adding minimal permissions in all experimenys

Signed-off-by: shubhamchaudhary <shubham@chaosnative.com>

* fix(script): convert combine experiments code to binary

Signed-off-by: shubhamchaudhary <shubham@chaosnative.com>

* disk-fill experiment has beem added for sock-shp workflow (#420)

Signed-off-by: oumkale <oum.kale@mayadata.io>

* chore(env): adding EPHEMERAL_STORAGE_MEBIBYTES env in disk-fill (#424)

Signed-off-by: shubhamchaudhary <shubham@chaosnative.com>

* Chore(ec2): Update rbac permission and add managed nodegroup (#425)

Signed-off-by: udit <udit@chaosnative.com>

Co-authored-by: udit <udit@chaosnative.com>

* Cherry Pick for !.13.2

Signed-off-by: udit <udit@chaosnative.com>

* update workflow image to 1.13.2

Signed-off-by: udit <udit@chaosnative.com>

* update workflow hub link to 1.13.2

Signed-off-by: udit <udit@chaosnative.com>

* Chore(cleanup): Remove unwanted files (#426)

Signed-off-by: udit <udit@chaosnative.com>

Co-authored-by: udit <udit@chaosnative.com>

* update workflow hub link to 1.13.2

Signed-off-by: udit <udit@chaosnative.com>

* update(workflows): Updating k8Probe schema inside workflows (#428)

Signed-off-by: shubhamchaudhary <shubham@chaosnative.com>
Signed-off-by: udit <udit@chaosnative.com>

Co-authored-by: oumkale <oum.kale@mayadata.io>
Co-authored-by: Shubham Chaudhary <shubham.chaudhary@mayadata.io>
Co-authored-by: udit <udit@chaosnative.com>
Co-authored-by: litmusbot <litmuschaos@gmail.com>
2021-03-16 01:04:26 +05:30
Udit Gaurav f60c99bfa3
Chore(v1.13.0): Update charts with version 1.13.0 (#415)
* Chore(v1.13.0): Update charts with version 1.13.0

Signed-off-by: udit <udit.gaurav@mayadata.io>

* Update version in workflows

Signed-off-by: udit <udit.gaurav@mayadata.io>
2021-02-16 01:11:54 +05:30
942 changed files with 141487 additions and 24360 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@ -16,7 +16,7 @@ jobs:
# Install golang
- uses: actions/setup-go@v2
with:
go-version: '1.14'
go-version: '^1.13.1'
# Setup gopath
- name: Setting up GOPATH
@ -35,3 +35,4 @@ jobs:
export PATH=$PATH:$(go env GOPATH)/bin
cd ${GOPATH}/src/github.com/${{github.repository}}
make combineExpCR
shell: bash

View File

@ -15,7 +15,7 @@ jobs:
# Install golang
- uses: actions/setup-go@v2
with:
go-version: '1.14'
go-version: '^1.13.1'
# Setup gopath
- name: Setting up GOPATH
@ -35,9 +35,12 @@ jobs:
export PATH=$PATH:$(go env GOPATH)/bin
cd ${GOPATH}/src/github.com/${{github.repository}}
make combineExpCR
shell: bash
# Commit and push the changes from the chart directory
- name: Commit and Push the changes
run: |
cd ${GOPATH}/src/github.com/${{github.repository}}
make push
shell: bash

215
.gitignore vendored
View File

@ -1,215 +0,0 @@
# Created by https://www.toptal.com/developers/gitignore/api/git,visualstudiocode,goland+all,jetbrains+all,macos
# Edit at https://www.toptal.com/developers/gitignore?templates=git,visualstudiocode,goland+all,jetbrains+all,macos
### Git ###
# Created by git for backups. To disable backups in Git:
# $ git config --global mergetool.keepBackup false
*.orig
# Created by git when using merge tools for conflicts
*.BACKUP.*
*.BASE.*
*.LOCAL.*
*.REMOTE.*
*_BACKUP_*.txt
*_BASE_*.txt
*_LOCAL_*.txt
*_REMOTE_*.txt
### GoLand+all ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### GoLand+all Patch ###
# Ignore everything but code style settings and run configurations
# that are supposed to be shared within teams.
.idea/*
!.idea/codeStyles
!.idea/runConfigurations
### JetBrains+all ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
# AWS User-specific
# Generated files
# Sensitive or high-churn files
# Gradle
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
# Mongo Explorer plugin
# File-based project format
# IntelliJ
# mpeltonen/sbt-idea plugin
# JIRA plugin
# Cursive Clojure plugin
# SonarLint plugin
# Crashlytics plugin (for Android Studio and IntelliJ)
# Editor-based Rest Client
# Android studio 3.1+ serialized cache file
### JetBrains+all Patch ###
# Ignore everything but code style settings and run configurations
# that are supposed to be shared within teams.
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### macOS Patch ###
# iCloud generated files
*.icloud
### VisualStudioCode ###
.vscode/
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
# Built Visual Studio Code Extensions
*.vsix
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
.ionide
# End of https://www.toptal.com/developers/gitignore/api/git,visualstudiocode,goland+all,jetbrains+all,macos

View File

@ -10,6 +10,8 @@ Chaos Charts are a groups of categorized chaos experiments, represented as custo
- <b>Generic</b>: It contains chaos to disrupt state of kubernetes resources. i.e, pod-delete
- <b>OpenEBS</b>: It contains chaos to disrupt state of OpenEBS control/date plane components. i.e, openebs-target-failure
- <b>Cassandra</b>: It contains chaos to disrupt state of Cassandra Applications. i.e, cassandra-pod-delete
- <b>Kafka</b>: It contains chaos to disrupt state of Kafka Applications. i.e, kafka-broker-pod-delete
- <b>Coredns</b>: It contains chaos to disrupt state of Coredns pod. i.e, coredns-pod-delete
- <b>Kube-AWS</b>: It contains chaos to disrupt state of AWS resources running part of the kubernetes cluster. i.e, ebs-loss
- <b>Kube-Components</b>: It contains chaos to disrupt the state of kubernetes components. i.e, k8-kube-proxy.

View File

@ -17,7 +17,7 @@ versionmaker:
.PHONY: combineExpCR
combineExpCR:
@echo "--------Combining Experiments CR-------"
@cd scripts && go run ./combine-all-crs.go
./scripts/combine-experiments
.PHONY: push
push:

210
README.md
View File

@ -1,218 +1,34 @@
# Chaos-Charts
[![Slack Channel](https://img.shields.io/badge/Slack-Join-purple)](https://slack.litmuschaos.io)
![GitHub Workflow](https://github.com/litmuschaos/chaos-charts/actions/workflows/push.yml/badge.svg?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/litmuschaos/go-runner.svg)](https://hub.docker.com/r/litmuschaos/go-runner)
[![GitHub issues](https://img.shields.io/github/issues/litmuschaos/chaos-charts)](https://github.com/litmuschaos/chaos-charts/issues)
[![Twitter Follow](https://img.shields.io/twitter/follow/litmuschaos?style=social)](https://twitter.com/LitmusChaos)
[![YouTube Channel](https://img.shields.io/badge/YouTube-Subscribe-red)](https://www.youtube.com/channel/UCa57PMqmz_j0wnteRa9nCaw)
<br><br>
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Flitmuschaos%2Fchaos-charts.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Flitmuschaos%2Fchaos-charts?ref=badge_shield)
This repository hosts the Litmus Chaos Charts. A set of related chaos faults are bundled into a Chaos Chart. Chaos Charts are classified into the following categories.
This repository hosts the Litmus Chaos Charts.
- [Kubernetes Chaos](#kubernetes-chaos)
- [Application Chaos](#application-chaos)
- [Platform Chaos](#platform-chaos)
## Installation Steps for Chart Releases
### Kubernetes Chaos
*Note: Supported from release 1.1.0*
Chaos faults that apply to Kubernetes resources are classified in this category. Following chaos faults are supported for Kubernetes:
<table>
<tr>
<th> Fault Name </th>
<th> Description </th>
<th> Link </th>
</tr>
<tr>
<td> Container Kill </td>
<td> Kill one container in the application pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/container-kill"> container-kill </a></td>
<tr>
<tr>
<td> Disk Fill </td>
<td> Fill the Ephemeral Storage of the Pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/disk-fill"> disk-fill </a></td>
<tr>
<tr>
<td> Docker Service Kill </td>
<td> Kill docker service of the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/docker-service-kill"> docker-service-kill </a></td>
<tr>
<tr>
<td> Kubelet Service Kill </td>
<td> Kill kubelet service of the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/kubelet-service-kill"> kubelet-service-kill </a></td>
<tr>
<tr>
<td> Node CPU Hog </td>
<td> Stress the cpu of the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/node-cpu-hog"> node-cpu-hog </a></td>
<tr>
<tr>
<td> Node Drain </td>
<td> Drain the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/node-drain"> node-drain </a></td>
<tr>
<tr>
<td> Node IO Stress </td>
<td> Stress the IO of the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/node-io-stress"> node-io-stress </a></td>
<tr>
<tr>
<td> Node Memory Hog </td>
<td> Stress the memory of the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/node-memory-hog"> node-memory-hog </a></td>
<tr>
<tr>
<td> Node Restart </td>
<td> Restart the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/node-restart"> node-restart </a></td>
<tr>
<tr>
<td> Node Taint </td>
<td> Taint the target node </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/node-taint"> node-taint </a></td>
<tr>
<tr>
<td> Pod Autoscaler </td>
<td> Scale the replicas of the target application </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-autoscaler"> pod-autoscaler </a></td>
<tr>
<tr>
<td> Pod CPU Hog </td>
<td> Stress the CPU of the target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-cpu-hog"> pod-cpu-hog </a></td>
<tr>
<tr>
<td> Pod Delete </td>
<td> Delete the target pods </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-delete"> pod-delete </a></td>
<tr>
<tr>
<td> Pod DNS Spoof </td>
<td> Spoof dns requests to desired target hostnames </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-dns-spoof"> pod-dns-spoof </a></td>
<tr>
<tr>
<td> Pod DNS Error </td>
<td> Error the dns requests of the target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-dns-error"> pod-dns-error </a></td>
<tr>
<tr>
<td> Pod IO Stress </td>
<td> Stress the IO of the target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-io-stress"> pod-io-stress </a></td>
<tr>
<tr>
<td> Pod Memory Hog </td>
<td> Stress the memory of the target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-memory-hog"> pod-memory-hog </a></td>
<tr>
<tr>
<td> Pod Network Latency </td>
<td> Induce the network latency in target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-network-latency"> pod-network-latency </a></td>
<tr>
<tr>
<td> Pod Network Corruption </td>
<td> Induce the network packet corruption in target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-network-corruption"> pod-network-corruption </a></td>
<tr>
<tr>
<td> Pod Network Duplication </td>
<td> Induce the network packet duplication in target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-network-duplication"> pod-network-duplication </a></td>
<tr>
<tr>
<td> Pod Network Loss </td>
<td> Induce the network loss in target pod </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-network-loss"> pod-network-loss </a></td>
<tr>
<tr>
<td> Pod Network Partition </td>
<td> Disrupt network connectivity to kubernetes pods </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/kubernetes/pod-network-partition"> pod-network-partition </a></td>
<tr>
</table>
### Application Chaos
While chaos faults under the Kubernetes category offer the ability to induce chaos into Kubernetes resources, it is difficult to analyze and conclude if the induced chaos found a weakness in a given application. The application specific chaos faults are built with some checks on *pre-conditions* and some expected outcomes after the chaos injection. The result of the chaos faults is determined by matching the outcome with the expected outcome.
<table>
<tr>
<th> Fault Category </th>
<th> Description </th>
<th> Link </th>
</tr>
<tr>
<td> Spring Boot Faults </td>
<td> Injects faults in Spring Boot applications </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/spring-boot"> Spring Boot Faults</a></td>
<tr>
</table>
### Platform Chaos
Chaos faults that inject chaos into the platform and infrastructure resources are classified into this category. Management of platform resources vary significantly from each other, Chaos Charts may be maintained separately for each platform (For example: AWS, GCP, Azure, VMWare etc.)
Following chaos faults are classified in this category:
<table>
<tr>
<th> Fault Category </th>
<th> Description </th>
<th> Link </th>
</tr>
<tr>
<td> AWS Faults </td>
<td> AWS Platform specific chaos </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/aws"> AWS Faults </a></td>
<tr>
<tr>
<td> Azure Faults </td>
<td> Azure Platform specific chaos </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/azure"> Azure Faults </a></td>
<tr>
<tr>
<td> GCP Faults </td>
<td> GCP Platform specific chaos </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/gcp"> GCP Faults </a></td>
<tr>
<tr>
<td> VMWare Faults </td>
<td> VMWare Platform specific chaos </td>
<td> <a href="https://github.com/litmuschaos/chaos-charts/tree/master/faults/vmware"> VMWare Faults </a></td>
<tr>
</table>
## Installation Steps for Chart Releases
*Note: Supported from release 3.0.0*
- To install the chaos faults from a specific chart for a given release, execute the following commands
- To install the chaos experiments from a specific chart for a given release, execute the following commands
with the desired `<release_version>`, `<chart_name>` & `<namespace>`
```bash
## downloads and unzips the released source
tar -zxvf <(curl -sL https://github.com/litmuschaos/chaos-charts/archive/<release_version>.tar.gz)
## installs the chaosexperiment resources
## installs the chaosexperiment resources
find chaos-charts-<release_version> -name experiments.yaml | grep <chart-name> | xargs kubectl apply -n <namespace> -f
```
- For example, to install the *Kubernetes* fault chart bundle for release *3.0.0*, in the *sock-shop* namespace, run:
```
- For example, to install the *generic* experiment chart bundle for release *1.1.0*, in the *sock-shop* namespace, run:
```bash
tar -zxvf <(curl -sL https://github.com/litmuschaos/chaos-charts/archive/3.0.0.tar.gz)
find chaos-charts-3.0.0 -name experiments.yaml | grep kubernetes | xargs kubectl apply -n sock-shop -f
tar -zxvf <(curl -sL https://github.com/litmuschaos/chaos-charts/archive/1.1.0.tar.gz)
find chaos-charts-1.1.0 -name experiments.yaml | grep generic | xargs kubectl apply -n sock-shop -f
```
- If you would like to install a specific fault, replace the `experiments.yaml` in the above command with the relative path of the fault manifest within the parent chart. For example, to install only the *pod-delete* fault, run:
- If you would like to install a specific experiment, replace the `experiments.yaml` in the above command with the relative
path of the experiment manifest within the parent chart. For example, to install only the *pod-delete* experiment, run:
```bash
find chaos-charts-3.0.0 -name fault.yaml | grep 'kubernetes/pod-delete' | xargs kubectl apply -n sock-shop -f
find chaos-charts-1.1.0 -name experiment.yaml | grep 'generic/pod-delete' | xargs kubectl apply -n sock-shop -f
```

View File

@ -0,0 +1,57 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: cassandra-chaos
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=cassandra'
appkind: 'statefulset'
# It can be true/false
annotationCheck: 'true'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: cassandra-pod-delete-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: cassandra-pod-delete
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '15'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '15'
# pod failures without '--force' & default terminationGracePeriodSeconds
- name: FORCE
value: 'false'
# provide cassandra service name
# default service: cassandra
- name: CASSANDRA_SVC_NAME
value: 'cassandra'
# provide the keyspace replication factor
- name: KEYSPACE_REPLICATION_FACTOR
value: '3'
# provide cassandra port
# default port: 9042
- name: CASSANDRA_PORT
value: '9042'
# SET THE CASSANDRA_LIVENESS_CHECK
# IT CAN BE `enabled` OR `disabled`
- name: CASSANDRA_LIVENESS_CHECK
value: ''

View File

@ -0,0 +1,115 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a casandra statefulset
kind: ChaosExperiment
metadata:
name: cassandra-pod-delete
labels:
name: cassandra-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "deployments"
- "statefulsets"
- "jobs"
- "pods"
- "pods/log"
- "pods/exec"
- "services"
- "events"
- "configmaps"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "get"
- "list"
image: "litmuschaos/ansible-runner:1.13.0"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/cassandra/pod-delete/pod_delete_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
# provide cassandra service name
# default service: cassandra
- name: CASSANDRA_SVC_NAME
value: ''
# provide the keyspace replication factor
- name: KEYSPACE_REPLICATION_FACTOR
value: ''
# provide cassandra port
# default port: 9042
- name: CASSANDRA_PORT
value: ''
# provide liveness deployement port
# default port: 8088, It only supports 8088 port
- name: LIVENESS_SVC_PORT
value: '8088'
# provide cassandra liveness image
- name: CASSANDRA_LIVENESS_IMAGE
value: 'litmuschaos/cassandra-client:latest'
- name: TOTAL_CHAOS_DURATION
value: '15'
- name: CHAOS_INTERVAL
value: '15'
# SET THE CASSANDRA_LIVENESS_CHECK
# IT CAN BE `enabled` OR `disabled`
- name: CASSANDRA_LIVENESS_CHECK
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
# provide the kill count
- name: KILL_COUNT
value: ''
- name: FORCE
value: ''
## env var that describes the library used to execute the chaos
## default: litmus. Supported values: litmus, powerfulseal
- name: LIB
value: ''
labels:
name: cassandra-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,38 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cassandra-pod-delete-sa
labels:
name: cassandra-pod-delete-sa
rules:
- apiGroups: ["","litmuschaos.io","batch","apps"]
resources: ["pods","deployments","statefulsets","pods/log","pods/exec","services","events","jobs","configmaps","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cassandra-pod-delete-sa
labels:
name: cassandra-pod-delete-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cassandra-pod-delete-sa
subjects:
- kind: ServiceAccount
name: cassandra-pod-delete-sa
namespace: default

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
rules:
- apiGroups: ["","litmuschaos.io","batch","apps"]
resources: ["pods","deployments","statefulsets","services","pods/log","pods/exec","events","jobs","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cassandra-pod-delete-sa
subjects:
- kind: ServiceAccount
name: cassandra-pod-delete-sa
namespace: default

View File

@ -0,0 +1,46 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2020-04-14T10:28:08Z
name: cassandra-pod-delete
version: 0.1.3
annotations:
categories: Cassandra
vendor: CNCF
support: https://cassandra-slack.herokuapp.com
spec:
displayName: cassandra-pod-delete
categoryDescription: |
This experiment causes (forced/graceful) pod failure of random replicas of an cassandra statefulset.
It tests statefulset sanity (replica availability & uninterrupted service) and recovery workflows of the cassandra pod
keywords:
- Kubernetes
- Cassandra
- Pod
platforms:
- GKE
- Konvoy
- Packet(Kubeadm)
- Minikube
- EKS
maturity: alpha
maintainers:
- name: Shubham Chaudhary
email: shubham.chaudhary@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-ansible/tree/master/experiments/cassandra/pod-delete
- name: Documentation
url: https://docs.litmuschaos.io/docs/cassandra-pod-delete/
- name: Video
url:
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/cassandra/cassandra-pod-delete/experiment.yaml

View File

@ -0,0 +1,57 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: cassandra-chaos
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=cassandra'
appkind: 'statefulset'
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: cassandra-pod-delete-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: cassandra-pod-delete
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '15'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '15'
# pod failures without '--force' & default terminationGracePeriodSeconds
- name: FORCE
value: 'false'
# provide cassandra service name
# default service: cassandra
- name: CASSANDRA_SVC_NAME
value: 'cassandra'
# provide the keyspace replication factor
- name: KEYSPACE_REPLICATION_FACTOR
value: '3'
# provide cassandra port
# default port: 9042
- name: CASSANDRA_PORT
value: '9042'
# SET THE CASSANDRA_LIVENESS_CHECK
# IT CAN BE `enabled` OR `disabled`
- name: CASSANDRA_LIVENESS_CHECK
value: ''

View File

@ -0,0 +1,110 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a casandra statefulset
kind: ChaosExperiment
metadata:
name: cassandra-pod-delete
labels:
name: cassandra-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "deployments"
- "statefulsets"
- "jobs"
- "pods"
- "pods/log"
- "pods/exec"
- "services"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- "deletecollection"
image: "litmuschaos/go-runner:1.13.2"
imagePullPolicy: Always
args:
- -c
- ./experiments -name cassandra-pod-delete
command:
- /bin/bash
env:
# provide cassandra service name
# default service: cassandra
- name: CASSANDRA_SVC_NAME
value: ''
# provide the keyspace replication factor
- name: KEYSPACE_REPLICATION_FACTOR
value: ''
# provide cassandra port
# default port: 9042
- name: CASSANDRA_PORT
value: '9042'
# provide liveness deployement port
# default port: 8088, It only supports 8088 port
- name: LIVENESS_SVC_PORT
value: '8088'
# provide cassandra liveness image
- name: CASSANDRA_LIVENESS_IMAGE
value: 'litmuschaos/cassandra-client:latest'
- name: TOTAL_CHAOS_DURATION
value: '15'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: CHAOS_INTERVAL
value: '15'
# SET THE CASSANDRA_LIVENESS_CHECK
# IT CAN BE `enabled` OR `disabled`
- name: CASSANDRA_LIVENESS_CHECK
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
- name: FORCE
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
## env var that describes the library used to execute the chaos
## default: litmus. Supported values: litmus
- name: LIB
value: 'litmus'
labels:
name: cassandra-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,52 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
resources: ["pods","events","services"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log"]
verbs: ["create","list","get"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["apps"]
resources: ["deployments","statefulsets"]
verbs: ["list","get"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cassandra-pod-delete-sa
namespace: default
labels:
name: cassandra-pod-delete-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cassandra-pod-delete-sa
subjects:
- kind: ServiceAccount
name: cassandra-pod-delete-sa
namespace: default

View File

@ -0,0 +1,43 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2020-04-14T10:28:08Z
name: cassandra
version: 0.1.3
annotations:
categories: Cassandra
chartDescription: Injects cassandra kubernetes chaos
spec:
displayName: Cassandra Chaos
categoryDescription: >
Cassandra is a free and open-source, distributed, wide column store, NoSQL database management system designed to handle
large amounts of data across many commodity servers, providing high availability with no single point of failure.
It will install all the experiments which can be used to inject chaos into cassandra applications.
experiments:
- cassandra-pod-delete
keywords:
- Kubernetes
- Pod
- Cassandra
- Statefulset
maintainers:
- name: ksatchit
email: karthik.s@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
links:
- name: Kubernetes Website
url: https://kubernetes.io
- name: Cassandra Website
url: http://cassandra.apache.org/
- name: Source Code
url: https://github.com/apache/cassandra
- name: Cassandra Slack
url: https://cassandra-slack.herokuapp.com
- name: Documentation
url: http://cassandra.apache.org/doc/latest/
icon:
- url: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/cassandra/icons/cassandra.png
mediatype: image/png
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/cassandra/experiments.yaml

View File

@ -0,0 +1,5 @@
packageName: cassandra
experiments:
- name: cassandra-pod-delete
CSV: cassandra-pod-delete.chartserviceversion.yaml
desc: "cassandra-pod-delete"

View File

@ -0,0 +1,112 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a casandra statefulset
kind: ChaosExperiment
metadata:
name: cassandra-pod-delete
labels:
name: cassandra-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "deployments"
- "statefulsets"
- "jobs"
- "pods"
- "pods/log"
- "pods/exec"
- "services"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- "deletecollection"
image: "litmuschaos/go-runner:1.13.2"
imagePullPolicy: Always
args:
- -c
- ./experiments -name cassandra-pod-delete
command:
- /bin/bash
env:
# provide cassandra service name
# default service: cassandra
- name: CASSANDRA_SVC_NAME
value: ''
# provide the keyspace replication factor
- name: KEYSPACE_REPLICATION_FACTOR
value: ''
# provide cassandra port
# default port: 9042
- name: CASSANDRA_PORT
value: '9042'
# provide liveness deployement port
# default port: 8088, It only supports 8088 port
- name: LIVENESS_SVC_PORT
value: '8088'
# provide cassandra liveness image
- name: CASSANDRA_LIVENESS_IMAGE
value: 'litmuschaos/cassandra-client:latest'
- name: TOTAL_CHAOS_DURATION
value: '15'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: CHAOS_INTERVAL
value: '15'
# SET THE CASSANDRA_LIVENESS_CHECK
# IT CAN BE `enabled` OR `disabled`
- name: CASSANDRA_LIVENESS_CHECK
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
- name: FORCE
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
## env var that describes the library used to execute the chaos
## default: litmus. Supported values: litmus
- name: LIB
value: 'litmus'
labels:
name: cassandra-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0
---

View File

Before

Width:  |  Height:  |  Size: 959 B

After

Width:  |  Height:  |  Size: 959 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@ -0,0 +1,41 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2019-12-30T10:28:08Z
name: coredns-pod-delete
version: 0.1.7
annotations:
categories: CoreDNS
vendor: CNCF
support: https://slack.cncf.io/
spec:
displayName: coredns-pod-delete
categoryDescription: |
Pod delete contains chaos to disrupt state of coredns resources. Experiments can inject random pod delete failures against specified application.
- Causes (forced/graceful) pod failure of coredns replicas of an application deployment.
- Tests of successful resolution of app services during chaos injection
keywords:
- Kubernetes
- CoreDNS
- Pod delete
platforms:
- GKE
- Minikube
maturity: alpha
maintainers:
- name: Raj Babu Das
email: raj.das@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-ansible/tree/master/experiments/coredns/pod_delete
- name: Documentation
url: https://docs.litmuschaos.io/docs/coredns-pod-delete/
- name: Video
url: https://www.youtube.com/watch?v=pwo5idKW7q8
chaosexpcrdlink: https://raw.githubusercontent.com/litmuchaos/chaos-charts/master/charts/coredns/coredns-pod-delete/experiment.yaml

View File

@ -0,0 +1,35 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: engine-coredns
namespace: kube-system
spec:
appinfo:
appns: 'kube-system'
applabel: 'k8s-app=kube-dns'
appkind: 'deployment'
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: coredns-pod-delete-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: coredns-pod-delete
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '10'
- name: CHAOS_NAMESPACE
value: 'kube-system'

View File

@ -0,0 +1,72 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes coredns pod in kube-system namespace
kind: ChaosExperiment
metadata:
name: coredns-pod-delete
labels:
name: coredns-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "services"
- "jobs"
- "pods"
- "pods/log"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "get"
- "list"
- "patch"
- "create"
- "update"
- "delete"
image: "litmuschaos/ansible-runner:1.13.0"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/coredns/pod_delete/pod_delete_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: TOTAL_CHAOS_DURATION
value: '15'
# provide the kill count
- name: KILL_COUNT
value: ''
- name: FORCE
value: 'true'
- name: CHAOS_INTERVAL
value: '5'
#supported libs are litmus and powerfulseal
- name: LIB
value: 'litmus'
- name: LIB_IMAGE
value: 'litmuschaos/pod-delete-helper:latest'
labels:
name: coredns-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-pod-delete-sa
namespace: kube-system
labels:
name: coredns-pod-delete-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: coredns-pod-delete-sa
labels:
name: coredns-pod-delete-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","litmuschaos.io","batch"]
resources: ["services", "pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: coredns-pod-delete-sa
labels:
name: coredns-pod-delete-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-pod-delete-sa
subjects:
- kind: ServiceAccount
name: coredns-pod-delete-sa
namespace: kube-system

View File

@ -0,0 +1,43 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2019-12-30T10:28:08Z
name: coredns
version: 0.1.4
annotations:
categories: Kubernetes
chartDescription: Injects coredns chaos
spec:
displayName: CoreDNS chaos
categoryDescription: >
CoreDNS is an open-source system for DNS management of containerized applications.
It helps to resolve services to IP address. It will install all the experiments which can be used to inject chaos into containerized applications.
experiments:
- coredns-pod-delete
keywords:
- Kubernetes
- Container
- node
- pod
- DNS
maintainers:
- name: Raj Babu Das
email: raj.das@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
links:
- name: CoreDNS Website
url: https://coredns.io
- name: Source Code
url: https://github.com/coredns/coredns
- name: CoreDNS Slack
url: https://slack.cncf.io/
- name: Documentation
url: https://coredns.io/manual/toc/
icon:
- url: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/coredns/icons/coredns-pod-delete.png
mediatype: image/png
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/coredns/experiments.yaml

View File

@ -0,0 +1,5 @@
packageName: coredns
experiments:
- name: coredns-pod-delete
CSV: coredns-pod-delete.chartserviceversion.yaml
desc: "coredns pod delete experiment"

View File

@ -0,0 +1,74 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes coredns pod in kube-system namespace
kind: ChaosExperiment
metadata:
name: coredns-pod-delete
labels:
name: coredns-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "services"
- "jobs"
- "pods"
- "pods/log"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "get"
- "list"
- "patch"
- "create"
- "update"
- "delete"
image: "litmuschaos/ansible-runner:1.13.0"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/coredns/pod_delete/pod_delete_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: TOTAL_CHAOS_DURATION
value: '15'
# provide the kill count
- name: KILL_COUNT
value: ''
- name: FORCE
value: 'true'
- name: CHAOS_INTERVAL
value: '5'
#supported libs are litmus and powerfulseal
- name: LIB
value: 'litmus'
- name: LIB_IMAGE
value: 'litmuschaos/pod-delete-helper:latest'
labels:
name: coredns-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0
---

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -0,0 +1,37 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be true/false
annotationCheck: 'true'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
chaosServiceAccount: container-kill-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: container-kill
spec:
components:
env:
# specify the name of the container to be killed
- name: TARGET_CONTAINER
value: 'nginx'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'

View File

@ -0,0 +1,83 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: "Kills a container belonging to an application pod \n"
kind: ChaosExperiment
metadata:
name: container-kill
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "pods/exec"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "update"
- "patch"
- "delete"
image: "litmuschaos/ansible-runner:1.13.0"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/generic/container_kill/container_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: TARGET_CONTAINER
value: ''
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
# It supports pumba and containerd
- name: LIB
value: 'pumba'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the container runtime path for containerd
# applicable only for containerd runtime
- name: CONTAINER_PATH
value: '/run/containerd/containerd.sock'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
# LIB_IMAGE can be - gaiaadm/pumba:0.6.5, litmuschaos/container-kill-helper:latest
# For pumba image use: gaiaadm/pumba:0.6.5
# For containerd image use: litmuschaos/container-kill-helper:latest
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,40 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","litmuschaos.io","batch","apps"]
resources: ["pods","jobs","pods/exec","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: container-kill-sa
subjects:
- kind: ServiceAccount
name: container-kill-sa
namespace: default

View File

@ -0,0 +1,46 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2019-10-15T10:28:08Z
name: container-kill
version: 0.1.13
annotations:
categories: "Kubernetes"
vendor: "CNCF"
support: https://slack.openebs.io/
spec:
displayName: container-kill
categoryDescription: |
Container kill contains chaos to disrupt state of kubernetes resources. Experiments can inject random container delete failures against specified application.
- Executes SIGKILL on containers of random replicas of an application deployment.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- Kubernetes
- Container
platforms:
- GKE
- Minikube
- Packet(Kubeadm)
- EKS
- AKS
maturity: alpha
maintainers:
- name: ksatchit
email: karthik.s@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-go/tree/master/experiments/generic/container-kill
- name: Documentation
url: https://docs.litmuschaos.io/docs/container-kill/
- name: Video
url: https://www.youtube.com/watch?v=XKyMNdVsKMo
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/container-kill/experiment.yaml

View File

@ -1,41 +1,41 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
chaosServiceAccount: container-kill-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: container-kill
spec:
components:
env:
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
# provide the name of container runtime
# it supports docker, containerd, crio
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
value: 'docker'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_CONTAINER
value: ''
value: '/var/run/docker.sock'

View File

@ -0,0 +1,50 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: nginx
spec:
# It can be true/false
annotationCheck: 'true'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: 'nginx'
applabel: 'app=nginx'
appkind: 'deployment'
chaosServiceAccount: container-kill-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: container-kill
spec:
components:
env:
# specify the name of the container to be killed
- name: TARGET_CONTAINER
value: 'nginx'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
- name: LIB_IMAGE
value: 'litmuschaos/go-runner:1.13.2'
# provide the name of container runtime
# it supports docker, containerd, crio
# default to docker
- name: CONTAINER_RUNTIME
value: 'docker'
# provide the container runtime path for containerd
# applicable only for containerd runtime
- name: CONTAINER_PATH
value: '/run/containerd/containerd.sock'

View File

@ -0,0 +1,107 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: "Kills a container belonging to an application pod \n"
kind: ChaosExperiment
metadata:
name: container-kill
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.2
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "apps.openshift.io"
- "argoproj.io"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "replicationcontrollers"
- "deployments"
- "statefulsets"
- "daemonsets"
- "replicasets"
- "deploymentconfigs"
- "rollouts"
- "pods/exec"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "update"
- "patch"
- "delete"
- "deletecollection"
image: "litmuschaos/go-runner:1.13.2"
imagePullPolicy: Always
args:
- -c
- ./experiments -name container-kill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# lib can be litmus or pumba
- name: LIB
value: 'litmus'
- name: TARGET_PODS
value: ''
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
- name: SIGNAL
value: 'SIGKILL'
# provide the socket file path
- name: SOCKET_PATH
value: '/var/run/docker.sock'
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'docker'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: LIB_IMAGE
value: 'litmuschaos/go-runner:1.13.2'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.2

View File

@ -0,0 +1,62 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
resources: ["pods","events"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log","replicationcontrollers"]
verbs: ["list","get","create"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["apps"]
resources: ["deployments","statefulsets","daemonsets","replicasets"]
verbs: ["list","get"]
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list","get"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list","get"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames: ["litmus"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: container-kill-sa
subjects:
- kind: ServiceAccount
name: container-kill-sa
namespace: default

View File

@ -0,0 +1,58 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
resources: ["pods","events"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log","replicationcontrollers"]
verbs: ["list","get","create"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["apps"]
resources: ["deployments","statefulsets","daemonsets","replicasets"]
verbs: ["list","get"]
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list","get"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list","get"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: container-kill-sa
namespace: default
labels:
name: container-kill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: container-kill-sa
subjects:
- kind: ServiceAccount
name: container-kill-sa
namespace: default

View File

@ -0,0 +1,55 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: container-kill-sa
namespace: nginx
labels:
name: container-kill-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: container-kill-sa
namespace: nginx
labels:
name: container-kill-sa
rules:
- apiGroups: [""]
resources: ["pods","events"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log","replicationcontrollers"]
verbs: ["list","get","create"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["apps"]
resources: ["deployments","statefulsets","daemonsets","replicasets"]
verbs: ["list","get"]
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list","get"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list","get"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: container-kill-sa
namespace: nginx
labels:
name: container-kill-sa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: container-kill-sa
subjects:
- kind: ServiceAccount
name: container-kill-sa
namespace: nginx

View File

@ -0,0 +1,32 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
chaosServiceAccount: disk-fill-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: disk-fill
spec:
components:
env:
# specify the fill percentage according to the disk pressure required
- name: FILL_PERCENTAGE
value: '80'
- name: TARGET_CONTAINER
value: 'nginx'

View File

@ -0,0 +1,77 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/exec"
- "pods/log"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/ansible-runner:1.13.0"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/generic/disk_fill/disk_fill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: TARGET_CONTAINER
value: ''
- name: FILL_PERCENTAGE
value: '80'
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
# Provide the LIB here
# Only litmus supported
- name: LIB
value: 'litmus'
# Provide the container runtime path
# Default set to docker
- name: CONTAINER_PATH
value: '/var/lib/docker/containers'
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: disk-fill-sa
namespace: default
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: disk-fill-sa
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","apps","litmuschaos.io","batch"]
resources: ["pods","jobs","pods/exec","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: disk-fill-sa
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: disk-fill-sa
subjects:
- kind: ServiceAccount
name: disk-fill-sa
namespace: default

View File

@ -0,0 +1,47 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2019-11-11T10:28:08Z
name: disk-fill
version: 0.0.12
annotations:
categories: Kubernetes
vendor: CNCF
support: https://slack.kubernetes.io/
spec:
displayName: disk-fill
categoryDescription: |
Disk fill contains chaos to disrupt state of kubernetes resources.
- Causes (forced/graceful) Disk Stress by filling up the Ephemeral Storage of the Pod using one of it containers.
- Causes Pod to get Evicted if the Pod exceeds it Ephemeral Storage Limit.
- Tests the Ephemeral Storage Limits, to ensure those parameters are sufficient.
keywords:
- Kubernetes
- Disk
- State
platforms:
- GKE
- EKS
- AKS
maturity: alpha
chaosType: infra
maintainers:
- name: ksatchit
email: karthik.s@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-go/tree/master/experiments/generic/disk-fill
- name: Documentation
url: https://docs.litmuschaos.io/docs/disk-fill/
- name: Video
url: https://www.youtube.com/watch?v=pbok737rUPQ
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/disk-fill/experiment.yaml

View File

@ -0,0 +1,32 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
chaosServiceAccount: disk-fill-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: disk-fill
spec:
components:
env:
# specify the fill percentage according to the disk pressure required
- name: FILL_PERCENTAGE
value: '80'
- name: TARGET_CONTAINER
value: 'nginx'

View File

@ -0,0 +1,102 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.2
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "apps.openshift.io"
- "argoproj.io"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/exec"
- "pods/log"
- "replicationcontrollers"
- "deployments"
- "statefulsets"
- "daemonsets"
- "replicasets"
- "deploymentconfigs"
- "rollouts"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- "deletecollection"
image: "litmuschaos/go-runner:1.13.2"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
- name: FILL_PERCENTAGE
value: '80'
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# Provide the LIB here
# Only litmus supported
- name: LIB
value: 'litmus'
- name: TARGET_PODS
value: ''
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ''
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: LIB_IMAGE
value: 'litmuschaos/go-runner:1.13.2'
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: '/var/lib/docker/containers'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.2

View File

@ -0,0 +1,59 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: disk-fill-sa
namespace: default
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: disk-fill-sa
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
resources: ["pods","events"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log","replicationcontrollers"]
verbs: ["list","get","create"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["apps"]
resources: ["deployments","statefulsets","daemonsets","replicasets"]
verbs: ["list","get"]
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list","get"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list","get"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames: ["litmus"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: disk-fill-sa
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: disk-fill-sa
subjects:
- kind: ServiceAccount
name: disk-fill-sa
namespace: default

View File

@ -0,0 +1,55 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: disk-fill-sa
namespace: default
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: disk-fill-sa
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
resources: ["pods","events"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log","replicationcontrollers"]
verbs: ["list","get","create"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["apps"]
resources: ["deployments","statefulsets","daemonsets","replicasets"]
verbs: ["list","get"]
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list","get"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list","get"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: disk-fill-sa
labels:
name: disk-fill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: disk-fill-sa
subjects:
- kind: ServiceAccount
name: disk-fill-sa
namespace: default

View File

@ -0,0 +1,46 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2019-11-13T10:28:08Z
name: disk-loss
version: 0.1.11
annotations:
categories: Kubernetes
vendor: Mayadata
support: https://app.slack.com/client/T09NY5SBT/CNXNB0ZTN
spec:
displayName: disk-loss
categoryDescription: |
Disk Loss contains chaos to disrupt state of infra resources. Experiments can inject disk loss against specified application.
- Causes (forced/graceful) disk loss from node/instance..
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- Kubernetes
- AWS
- GCP
- Disk
platforms:
- GKE
- AWS(KOPS)
maturity: alpha
chaosType: infra
maintainers:
- name: Raj Babu Das
email: raj.das@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-ansible/tree/master/experiments/generic/disk_loss
- name: Documentation
url: https://docs.litmuschaos.io/docs/disk-loss/
- name: Video
url:
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/disk-loss/experiment.yaml

View File

@ -0,0 +1,57 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
chaosServiceAccount: disk-loss-sa
monitoring: false
# It can be retain/delete
jobCleanUpPolicy: 'delete'
experiments:
- name: disk-loss
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60'
# set cloud platform name
- name: CLOUD_PLATFORM
value: 'GKE'
# set app_check to check application state
- name: APP_CHECK
value: 'true'
# GCP project ID
- name: PROJECT_ID
value: 'litmus-demo-123'
# Node name of the cluster
- name: NODE_NAME
value: 'demo-node-123'
# Disk Name of the node, it must be an external disk.
- name: DISK_NAME
value: 'demo-disk-123'
# Enter the device name which you wanted to mount only for AWS.
- name: DEVICE_NAME
value: '/dev/sdb'
# Name of Zone in which node is present (GCP)
# Use Region Name when running with AWS (ex: us-central1)
- name: ZONE_NAME
value: 'us-central1-a'

View File

@ -0,0 +1,89 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching a persistent disk from a node/instance. Supports only for AWS and GCP
kind: ChaosExperiment
metadata:
name: disk-loss
labels:
name: disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/log"
- "secrets"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/ansible-runner:1.13.0"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/generic/disk_loss/disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: TOTAL_CHAOS_DURATION
value: '15'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: APP_CHECK
value: 'true'
# GKE and AWS supported
- name: CLOUD_PLATFORM
value: 'GKE'
- name: PROJECT_ID
value: ''
- name: NODE_NAME
value: ''
- name: DISK_NAME
value: ''
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
- name: ZONE_NAME
value: ''
- name: DEVICE_NAME
value: ''
labels:
name: disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: disk-loss-sa
namespace: default
labels:
name: disk-loss-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: disk-loss-sa
labels:
name: disk-loss-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","litmuschaos.io","batch"]
resources: ["pods","jobs","secrets","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: disk-loss-sa
labels:
name: disk-loss-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: disk-loss-sa
subjects:
- kind: ServiceAccount
name: disk-loss-sa
namespace: default

View File

@ -0,0 +1,46 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2020-07-14T10:28:08Z
name: docker-service-kill
version: 0.1.1
annotations:
categories: Kubernetes
vendor: CNCF
support: https://slack.kubernetes.io/
spec:
displayName: docker-service-kill
categoryDescription: |
docker-service-kill contains killing docker service gracefully for a certain chaos duration.
- Causes replicas may be evicted or becomes unreachable on account on nodes turning unschedulable (Not Ready) due to docker service kill.
- The application node should be healthy once chaos is stopped and the services are reaccessable.
keywords:
- Kubernetes
- Node
- Service
- Docker
platforms:
- GKE
- AKS
maturity: alpha
maintainers:
- name: Ankur Ghosh
email: ankur.ghosh3@wipro.com
minKubeVersion: 1.12.0
provider:
name: Wipro
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-ansible/tree/master/experiments/generic/docker_service_kill
- name: Documentation
url: https://docs.litmuschaos.io/docs/docker-service-kill/
- name: Video
url:
icon:
- base64data: ""
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/docker-service-kill/experiment.yaml

View File

@ -0,0 +1,35 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: docker-service-kill-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: docker-service-kill
spec:
components:
nodeSelector:
# provide the node labels
kubernetes.io/hostname: 'node02'
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '90'
# provide the actual name of node under test
- name: APP_NODE
value: 'node-01'

View File

@ -0,0 +1,69 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kills the docker service on the application node to check the resiliency.
kind: ChaosExperiment
metadata:
name: docker-service-kill
labels:
name: docker-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "apps"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "get"
- "list"
image: "litmuschaos/ansible-runner:ci"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/generic/docker_service_kill/docker_service_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: TOTAL_CHAOS_DURATION
value: '90' # in seconds
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: LIB
value: 'litmus'
labels:
name: docker-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,40 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: docker-service-kill-sa
namespace: default
labels:
name: docker-service-kill-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: docker-service-kill-sa
labels:
name: docker-service-kill-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","litmuschaos.io","batch","apps"]
resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: docker-service-kill-sa
labels:
name: docker-service-kill-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: docker-service-kill-sa
subjects:
- kind: ServiceAccount
name: docker-service-kill-sa
namespace: default

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,69 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2019-09-26T10:28:08Z
name: generic
version: 0.1.16
annotations:
categories: Kubernetes
chartDescription: Injects generic kubernetes chaos
spec:
displayName: Generic Chaos
categoryDescription: >
Kubernetes is an open-source system for automating deployment, scaling, and management of containerized applications. It groups containers that make up an application into logical units for easier management and discovery. It will install all the experiments which can be used to inject chaos into containerized applications.
experiments:
- pod-delete
- container-kill
- pod-cpu-hog
- pod-network-loss
- pod-network-latency
- pod-network-corruption
- node-drain
- node-cpu-hog
- disk-loss
- disk-fill
- node-memory-hog
- pod-memory-hog
- kubelet-service-kill
- pod-network-duplication
- node-taint
- docker-service-kill
- pod-autoscaler
- k8-pod-delete
- k8-service-kill
- node-io-stress
- pod-io-stress
- node-restart
keywords:
- Kubernetes
- Container
- Node
- Pod
- Disk
- IO
- Filesystem
- Network
- CPU
- Memory
- Service
maintainers:
- name: ksatchit
email: karthik.s@mayadata.io
minKubeVersion: 1.12.0
provider:
name: Mayadata
links:
- name: Kubernetes Website
url: https://kubernetes.io
- name: Source Code
url: https://github.com/kubernetes/kubernetes
- name: Kubernetes Slack
url: https://slack.kubernetes.io/
- name: Documentation
url: https://docs.litmuschaos.io/docs/chaoshub/#generic-chaos
icon:
- url: https://raw.githubusercontent.com/litmuschaos/charthub.litmuschaos.io/master/public/litmus.ico
mediatype: image/png
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/experiments.yaml

View File

@ -1,15 +1,14 @@
---
packageName: kubernetes
faults:
packageName: generic
experiments:
- name: pod-delete
CSV: pod-delete.chartserviceversion.yaml
desc: "pod-delete"
- name: container-kill
CSV: container-kill.chartserviceversion.yaml
CSV: container-kill.chartserviceversion.yaml
desc: "container-kill"
- name: pod-network-loss
CSV: pod-network-loss.chartserviceversion.yaml
desc: "pod-network-loss"
desc: "Pod-network-loss"
- name: pod-network-latency
CSV: pod-network-latency.chartserviceversion.yaml
desc: "pod-network-latency"
@ -22,6 +21,9 @@ faults:
- name: disk-fill
CSV: disk-fill.chartserviceversion.yaml
desc: "disk-fill"
- name: disk-loss
CSV: disk-loss.chartserviceversion.yaml
desc: "disk-loss"
- name: node-drain
CSV: node-drain.chartserviceversion.yaml
desc: "node-drain"
@ -31,21 +33,15 @@ faults:
- name: node-memory-hog
CSV: node-memory-hog.chartserviceversion.yaml
desc: "node-memory-hog"
- name: node-poweroff
CSV: node-poweroff.chartserviceversion.yaml
desc: "node-poweroff"
- name: node-restart
CSV: node-restart.chartserviceversion.yaml
desc: "node-restart"
- name: pod-memory-hog
CSV: pod-memory-hog.chartserviceversion.yaml
desc: "pod-memory-hog"
desc: "pod-memory-hog"
- name: kubelet-service-kill
CSV: kubelet-service-kill.chartserviceversion.yaml
desc: "kubelet-service-kill"
- name: pod-network-duplication
CSV: pod-network-duplication.chartserviceversion.yaml
desc: "pod-network-duplication"
desc: "pod-network-duplication"
- name: node-taint
CSV: node-taint.chartserviceversion.yaml
desc: "node-taint"
@ -55,48 +51,18 @@ faults:
- name: pod-autoscaler
CSV: pod-autoscaler.chartserviceversion.yaml
desc: "pod-autoscaler"
- name: k8-pod-delete
CSV: k8-pod-delete.chartserviceversion.yaml
desc: "k8-pod-delete"
- name: k8-service-kill
CSV: k8-service-kill.chartserviceversion.yaml
desc: "k8-service-kill"
- name: node-io-stress
CSV: node-io-stress.chartserviceversion.yaml
desc: "node-io-stress"
- name: pod-io-stress
CSV: pod-io-stress.chartserviceversion.yaml
desc: "pod-io-stress"
- name: pod-dns-error
CSV: pod-dns-error.chartserviceversion.yaml
desc: "pod-dns-error"
- name: pod-dns-spoof
CSV: pod-dns-spoof.chartserviceversion.yaml
desc: "pod-dns-spoof"
- name: pod-cpu-hog-exec
CSV: pod-cpu-hog-exec.chartserviceversion.yaml
desc: "pod-cpu-hog-exec"
- name: pod-memory-hog-exec
CSV: pod-memory-hog-exec.chartserviceversion.yaml
desc: "pod-memory-hog-exec"
- name: pod-network-partition
CSV: pod-network-partition.chartserviceversion.yaml
desc: "pod-network-partition"
- name: pod-http-latency
CSV: pod-http-latency.chartserviceversion.yaml
desc: "pod-http-latency"
- name: pod-http-status-code
CSV: pod-http-status-code.chartserviceversion.yaml
desc: "pod-http-status-code"
- name: pod-http-modify-header
CSV: pod-http-modify-header.chartserviceversion.yaml
desc: "pod-http-modify-header"
- name: pod-http-modify-body
CSV: pod-http-modify-body.chartserviceversion.yaml
desc: "pod-http-modify-body"
- name: pod-http-reset-peer
CSV: pod-http-reset-peer.chartserviceversion.yaml
desc: "pod-http-reset-peer"
- name: node-network-latency
CSV: node-network-latency.chartserviceversion.yaml
desc: "node-network-latency"
- name: node-network-loss
CSV: node-network-loss.chartserviceversion.yaml
desc: "node-network-loss"
- name: pod-network-rate-limit
CSV: pod-network-rate-limit.chartserviceversion.yaml
desc: "pod-network-rate-limit"
desc: "pod-io-stress"
- name: node-restart
CSV: node-restart.chartserviceversion.yaml
desc: "node-restart"

View File

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 959 B

After

Width:  |  Height:  |  Size: 959 B

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

View File

Before

Width:  |  Height:  |  Size: 2.4 KiB

After

Width:  |  Height:  |  Size: 2.4 KiB

View File

Before

Width:  |  Height:  |  Size: 1.9 KiB

After

Width:  |  Height:  |  Size: 1.9 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

Before

Width:  |  Height:  |  Size: 959 B

After

Width:  |  Height:  |  Size: 959 B

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 2.4 KiB

After

Width:  |  Height:  |  Size: 2.4 KiB

View File

Before

Width:  |  Height:  |  Size: 2.7 KiB

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-cluster-all-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: chaos-admin
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-all.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-cluster-count
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: chaos-admin
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-count.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-cluster-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: chaos-admin
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-health.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-cluster-custom-all-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: chaos-admin
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'app=nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-custom-kill-all.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-cluster-custom-count
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: chaos-admin
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'app=nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-custom-kill-count.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-cluster-custom-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: chaos-admin
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'app=nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-custom-kill-health.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,46 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","apps","batch"]
resources: ["jobs","deployments","daemonsets"]
verbs: ["create","list","get","patch","delete"]
- apiGroups: ["","litmuschaos.io"]
resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"]
verbs: ["get","create","update","patch","delete","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs : ["get","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: k8-pod-delete-sa
subjects:
- kind: ServiceAccount
name: k8-pod-delete-sa
namespace: default

View File

@ -0,0 +1,20 @@
# In Namespace Changes
* Apply experiments for k8 - `kubectl apply -f experiments.yaml`
* Validate the experiments for k8 - `kubectl get chaosexperiment`
* Setup RBAC - for pod delete RBAC - `kubectl apply -f rbac.yaml`
* Create pod Experiment - for health experiment -`kubectl create -f engine.yaml`
* Validate experiment - `kubectl get pods -w`
* Validate logs - `kubectl logs -f <delete pod>`
* Clean up chaosexperiment -`kubectl delete -f engine.yaml`
* Clean up rbac -`kubectl delete -f rbac.yaml`
# Remote namespace
* Apply experiments for K8 - `kubectl apply -f experiments.yaml`
* Validate the experiments for k8 - `kubectl get chaosexperiments`
* Setup RBAC as admin mode - `kubectl apply -f rbac-admin.yaml`
* Create pod Experiment - for health experiment -`kubectl create -f engine-kiam.yaml`
* Validate experiment - `kubectl get pods -w`
* Validate logs - `kubectl logs -f <delete pod>`
* Clean up chaosexperiment -`kubectl delete -f engine-kiam.yaml`
* Clean up rbac -`kubectl delete -f rbac-admin.yaml`

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-app-all-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-all.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-app-count
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-count.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-app-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-health.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-custom-all-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'app=nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-custom-kill-all.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-custom-count
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'app=nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-custom-kill-count.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,39 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-custom-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'app=nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-custom-kill-health.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,46 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","apps","batch"]
resources: ["jobs","deployments","daemonsets"]
verbs: ["create","list","get","patch","delete"]
- apiGroups: ["","litmuschaos.io"]
resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"]
verbs: ["get","create","update","patch","delete","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs : ["get","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: k8-pod-delete-sa
subjects:
- kind: ServiceAccount
name: k8-pod-delete-sa
namespace: default

View File

@ -0,0 +1,38 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos-app-health
namespace: default
spec:
appinfo:
appns: 'default'
applabel: 'app=nginx'
appkind: 'deployment'
annotationCheck: 'true'
engineState: 'active'
chaosServiceAccount: k8-pod-delete-sa
monitoring: false
jobCleanUpPolicy: 'retain'
experiments:
- name: k8-pod-delete
spec:
components:
env:
# set chaos namespace
- name: NAME_SPACE
value: 'default'
# set chaos label name
- name: LABEL_NAME
value: 'nginx'
# pod endpoint
- name: APP_ENDPOINT
value: 'localhost'
- name: FILE
value: 'pod-app-kill-health.json'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'

View File

@ -0,0 +1,83 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: k8-pod-delete
labels:
name: k8-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: 1.13.0
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "deployments"
- "jobs"
- "pods"
- "configmaps"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- apiGroups:
- ""
resources:
- "nodes"
verbs :
- "get"
- "list"
image: "litmuschaos/chaostoolkit:latest"
args:
- -c
- python /app/chaos/chaostest/kubernetes/k8_wrapper.py; exit 0
command:
- /bin/bash
env:
- name: CHAOSTOOLKIT_IN_POD
value: 'true'
- name: FILE
value: 'pod-app-kill-count.json'
- name: NAME_SPACE
value: ''
- name: LABEL_NAME
value: ''
- name: APP_ENDPOINT
value: ''
- name: PERCENTAGE
value: '50'
- name: REPORT
value: 'true'
- name: REPORT_ENDPOINT
value: 'none'
- name: TEST_NAMESPACE
value: 'default'
labels:
name: k8-pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: 1.13.0

View File

@ -0,0 +1,36 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: k8-pod-delete
version: 0.0.4
annotations:
categories: Kubernetes
vendor: CNCF
createdAt: 2020-02-24T10:28:08Z
support: https://slack.kubernetes.io/
spec:
displayName: k8-pod-delete
categoryDescription: |
K8 Pod delete contains chaos to disrupt state of kubernetes resources. It uses chaostoolkit to inject random pod delete failures against specified applications
keywords:
- Kubernetes
- State
platforms:
- Minikube
maturity: alpha
maintainers:
- name: sumit
email: sumit_nagal@intuit.com
minKubeVersion: 1.12.0
provider:
name: Intuit
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: 1.13.0
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-python/tree/master/chaos-test
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/k8-pod-delete/experiment.yaml

View File

@ -0,0 +1,46 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: ["","apps","batch"]
resources: ["jobs","deployments","daemonsets"]
verbs: ["create","list","get","patch","delete"]
- apiGroups: ["","litmuschaos.io"]
resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"]
verbs: ["get","create","update","patch","delete","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs : ["get","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: k8-pod-delete-sa
namespace: default
labels:
name: k8-pod-delete-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: k8-pod-delete-sa
subjects:
- kind: ServiceAccount
name: k8-pod-delete-sa
namespace: default

Some files were not shown because too many files have changed in this diff Show More