Compare commits
2 Commits
Author | SHA1 | Date |
---|---|---|
|
bc2a0248c0 | |
|
42f9ccc802 |
|
@ -0,0 +1,86 @@
|
|||
---
|
||||
name: Build
|
||||
on:
|
||||
pull_request:
|
||||
branches: [master]
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Install golang
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.1'
|
||||
|
||||
# Setup gopath
|
||||
- name: Setting up GOPATH
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
|
||||
|
||||
# Checkout to the latest commit
|
||||
# On specific directory/path
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/${{github.repository}}
|
||||
|
||||
#TODO: Add Dockerfile linting
|
||||
# Running go-lint
|
||||
- name: Checking Go-Lint
|
||||
run : |
|
||||
sudo apt-get update && sudo apt-get install golint
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make gotasks
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
# Install golang
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.1'
|
||||
|
||||
# Setup gopath
|
||||
- name: Setting up GOPATH
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
|
||||
|
||||
# Checkout to the latest commit
|
||||
# On specific directory/path
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/${{github.repository}}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build Docker Image
|
||||
env:
|
||||
DOCKER_REPO: litmuschaos
|
||||
DOCKER_IMAGE: go-runner
|
||||
DOCKER_TAG: ci
|
||||
run: |
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make build
|
||||
|
||||
trivy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: setup trivy
|
||||
run: |
|
||||
wget https://github.com/aquasecurity/trivy/releases/download/v0.11.0/trivy_0.11.0_Linux-64bit.tar.gz
|
||||
tar zxvf trivy_0.11.0_Linux-64bit.tar.gz
|
||||
make trivy-check
|
|
@ -0,0 +1,99 @@
|
|||
---
|
||||
name: Push
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags-ignore:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Install golang
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.1'
|
||||
|
||||
# Setup gopath
|
||||
- name: Setting up GOPATH
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
|
||||
|
||||
# Checkout to the latest commit
|
||||
# On specific directory/path
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/${{github.repository}}
|
||||
|
||||
#TODO: Add Dockerfile linting
|
||||
# Running go-lint
|
||||
- name: Checking Go-Lint
|
||||
run : |
|
||||
sudo apt-get update && sudo apt-get install golint
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make gotasks
|
||||
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
# Install golang
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.1'
|
||||
|
||||
# Setup gopath
|
||||
- name: Setting up GOPATH
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
|
||||
|
||||
# Checkout to the latest commit
|
||||
# On specific directory/path
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/${{github.repository}}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build Docker Image
|
||||
env:
|
||||
DOCKER_REPO: litmuschaos
|
||||
DOCKER_IMAGE: go-runner
|
||||
DOCKER_TAG: ci
|
||||
run: |
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make experiment-build
|
||||
|
||||
- name: Push Docker Image
|
||||
env:
|
||||
DOCKER_REPO: litmuschaos
|
||||
DOCKER_IMAGE: go-runner
|
||||
DOCKER_TAG: ci
|
||||
DNAME: ${{ secrets.DNAME }}
|
||||
DPASS: ${{ secrets.DPASS }}
|
||||
run: |
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make push
|
||||
|
||||
trivy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: setup trivy
|
||||
run: |
|
||||
wget https://github.com/aquasecurity/trivy/releases/download/v0.11.0/trivy_0.11.0_Linux-64bit.tar.gz
|
||||
tar zxvf trivy_0.11.0_Linux-64bit.tar.gz
|
||||
make trivy-check
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
name: Release
|
||||
on:
|
||||
create:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Install golang
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.1'
|
||||
|
||||
# Setup gopath
|
||||
- name: Setting up GOPATH
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
|
||||
|
||||
# Checkout to the latest commit
|
||||
# On specific directory/path
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/${{github.repository}}
|
||||
|
||||
#TODO: Add Dockerfile linting
|
||||
# Running go-lint
|
||||
- name: Checking Go-Lint
|
||||
run : |
|
||||
sudo apt-get update && sudo apt-get install golint
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make gotasks
|
||||
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
# Install golang
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.1'
|
||||
|
||||
# Setup gopath
|
||||
- name: Setting up GOPATH
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
|
||||
|
||||
# Checkout to the latest commit
|
||||
# On specific directory/path
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/${{github.repository}}
|
||||
|
||||
- name: Set Tag
|
||||
run: |
|
||||
TAG="${GITHUB_REF#refs/*/v}"
|
||||
echo "TAG=${TAG}" >> $GITHUB_ENV
|
||||
echo "RELEASE_TAG=${TAG}" >> $GITHUB_ENV
|
||||
|
||||
- name: Print Tag info
|
||||
run: |
|
||||
echo "RELEASE TAG: ${RELEASE_TAG}"
|
||||
echo "${RELEASE_TAG}" > ${{ github.workspace }}/tag.txt
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build Docker Image
|
||||
env:
|
||||
DOCKER_REPO: litmuschaos
|
||||
DOCKER_IMAGE: go-runner
|
||||
DOCKER_TAG: ${RELEASE_TAG}
|
||||
DNAME: ${{ secrets.DNAME }}
|
||||
DPASS: ${{ secrets.DPASS }}
|
||||
run: |
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make experiment-build
|
||||
|
||||
- name: Push Docker Image
|
||||
env:
|
||||
DOCKER_REPO: litmuschaos
|
||||
DOCKER_IMAGE: go-runner
|
||||
DOCKER_TAG: ${RELEASE_TAG}
|
||||
DNAME: ${{ secrets.DNAME }}
|
||||
DPASS: ${{ secrets.DPASS }}
|
||||
run: |
|
||||
cd go/src/github.com/${{github.repository}}
|
||||
make push
|
||||
|
||||
trivy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: setup trivy
|
||||
run: |
|
||||
wget https://github.com/aquasecurity/trivy/releases/download/v0.11.0/trivy_0.11.0_Linux-64bit.tar.gz
|
||||
tar zxvf trivy_0.11.0_Linux-64bit.tar.gz
|
||||
make trivy-check
|
|
@ -63,7 +63,7 @@ jobs:
|
|||
ACTIONS_ALLOW_UNSECURE_COMMANDS: true
|
||||
|
||||
- name: Setup Litmus
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
INSTALL_LITMUS: true
|
||||
|
||||
|
@ -80,7 +80,7 @@ jobs:
|
|||
|
||||
- name: Running Litmus pod delete chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Pod Delete]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-delete
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -90,7 +90,7 @@ jobs:
|
|||
|
||||
- name: Running container kill chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Container Kill]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: container-kill
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -101,7 +101,7 @@ jobs:
|
|||
|
||||
- name: Running node-cpu-hog chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Resource Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: node-cpu-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -112,7 +112,7 @@ jobs:
|
|||
|
||||
- name: Running node-memory-hog chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Resource Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: node-memory-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -122,7 +122,7 @@ jobs:
|
|||
|
||||
- name: Running pod-cpu-hog chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Resource Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-cpu-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -135,7 +135,7 @@ jobs:
|
|||
|
||||
- name: Running pod-memory-hog chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Resource Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-cpu-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -148,7 +148,7 @@ jobs:
|
|||
|
||||
- name: Running pod network corruption chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Network Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-corruption
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -162,7 +162,7 @@ jobs:
|
|||
|
||||
- name: Running pod network duplication chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Network Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-duplication
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -176,7 +176,7 @@ jobs:
|
|||
|
||||
- name: Running pod-network-latency chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Network Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-latency
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -191,7 +191,7 @@ jobs:
|
|||
|
||||
- name: Running pod-network-loss chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Network Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-loss
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -206,7 +206,7 @@ jobs:
|
|||
|
||||
- name: Running pod autoscaler chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[Scale Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-autoscaler
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -217,7 +217,7 @@ jobs:
|
|||
|
||||
- name: Running node-io-stress chaos experiment
|
||||
if: "contains(github.event.head_commit.message, '[IO Chaos]') || contains(github.event.head_commit.message, '[Run CI]') || env.TEST_RUN != 'true'"
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: node-io-stress
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -236,7 +236,7 @@ jobs:
|
|||
run: echo "Some tests are failing please check..."
|
||||
|
||||
- name: Uninstall Litmus
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
LITMUS_CLEANUP: true
|
||||
|
||||
|
|
|
@ -72,13 +72,13 @@ jobs:
|
|||
ACTIONS_ALLOW_UNSECURE_COMMANDS: true
|
||||
|
||||
- name: Setup Litmus
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
INSTALL_LITMUS: true
|
||||
|
||||
- name: Running Litmus pod delete chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-delete') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-delete
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -96,7 +96,7 @@ jobs:
|
|||
|
||||
- name: Running container kill chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-container-kill') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: container-kill
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -115,7 +115,7 @@ jobs:
|
|||
|
||||
- name: Running node-cpu-hog chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-node-cpu-hog') || startsWith(github.event.comment.body, '/run-e2e-resource-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: node-cpu-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -133,7 +133,7 @@ jobs:
|
|||
|
||||
- name: Running node-memory-hog chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-node-memory-hog') || startsWith(github.event.comment.body, '/run-e2e-resource-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: node-memory-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -151,7 +151,7 @@ jobs:
|
|||
|
||||
- name: Running pod-cpu-hog chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-cpu-hog') || startsWith(github.event.comment.body, '/run-e2e-resource-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-cpu-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -172,7 +172,7 @@ jobs:
|
|||
|
||||
- name: Running pod-memory-hog chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-memory-hog') || startsWith(github.event.comment.body, '/run-e2e-resource-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-cpu-hog
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -193,7 +193,7 @@ jobs:
|
|||
|
||||
- name: Running pod network corruption chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-network-corruption') || startsWith(github.event.comment.body, '/run-e2e-network-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-corruption
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -215,7 +215,7 @@ jobs:
|
|||
|
||||
- name: Running pod network duplication chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-network-duplication') || startsWith(github.event.comment.body, '/run-e2e-network-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-duplication
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -237,7 +237,7 @@ jobs:
|
|||
|
||||
- name: Running pod-network-latency chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-network-latency') || startsWith(github.event.comment.body, '/run-e2e-network-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-latency
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -260,7 +260,7 @@ jobs:
|
|||
|
||||
- name: Running pod-network-loss chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-network-loss') || startsWith(github.event.comment.body, '/run-e2e-network-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-network-loss
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -283,7 +283,7 @@ jobs:
|
|||
|
||||
- name: Running pod autoscaler chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-pod-autoscaler') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: pod-autoscaler
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -302,7 +302,7 @@ jobs:
|
|||
|
||||
- name: Running node-io-stress chaos experiment
|
||||
if: startsWith(github.event.comment.body, '/run-e2e-node-io-stress') || startsWith(github.event.comment.body, '/run-e2e-io-chaos') || startsWith(github.event.comment.body, '/run-e2e-all')
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
EXPERIMENT_NAME: node-io-stress
|
||||
EXPERIMENT_IMAGE: litmuschaos/go-runner
|
||||
|
@ -362,7 +362,7 @@ jobs:
|
|||
RUN_ID: ${{ github.run_id }}
|
||||
|
||||
- name: Uninstall Litmus
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.0
|
||||
uses: mayadata-io/github-chaos-actions@v0.3.1
|
||||
env:
|
||||
LITMUS_CLEANUP: true
|
||||
|
||||
|
|
47
.travis.yml
47
.travis.yml
|
@ -1,47 +0,0 @@
|
|||
|
||||
sudo: required
|
||||
os: linux
|
||||
dist: bionic
|
||||
|
||||
services:
|
||||
- docker
|
||||
language: go
|
||||
go:
|
||||
- 1.14.2
|
||||
|
||||
addons:
|
||||
apt:
|
||||
update: true
|
||||
|
||||
before_script:
|
||||
- sudo apt-get update && sudo apt-get install golint
|
||||
- sudo apt-get install -y rpm
|
||||
- wget https://github.com/aquasecurity/trivy/releases/download/v0.11.0/trivy_0.11.0_Linux-64bit.tar.gz
|
||||
- tar zxvf trivy_0.11.0_Linux-64bit.tar.gz
|
||||
- set -e
|
||||
# Configure environment so changes are picked up when the Docker daemon is restarted after upgrading
|
||||
- echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json
|
||||
- export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
- docker run --rm --privileged docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64
|
||||
# Upgrade to Docker CE 19.03 for BuildKit support
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce=5:19.03.8~3-0~ubuntu-bionic # pin version for reproducibility
|
||||
# Show info to simplify debugging and create a builder
|
||||
- docker info
|
||||
- docker buildx create --name builder --use
|
||||
- docker buildx ls
|
||||
|
||||
script:
|
||||
# Installing and configuring dependencies
|
||||
- make deps
|
||||
# Includes formatting, linting and check unused packages
|
||||
- make gotasks
|
||||
# Build
|
||||
- make build
|
||||
# Running trivy check
|
||||
- make trivy-check
|
||||
|
||||
after_success:
|
||||
- make push
|
25
Makefile
25
Makefile
|
@ -7,6 +7,11 @@
|
|||
#
|
||||
IS_DOCKER_INSTALLED = $(shell which docker >> /dev/null 2>&1; echo $$?)
|
||||
|
||||
# Docker info
|
||||
DOCKER_REPO ?= litmuschaos
|
||||
DOCKER_IMAGE ?= go-runner
|
||||
DOCKER_TAG ?= ci
|
||||
|
||||
PACKAGES = $(shell go list ./... | grep -v '/vendor/')
|
||||
|
||||
.PHONY: all
|
||||
|
@ -66,16 +71,21 @@ unused-package-check:
|
|||
fi
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
build: experiment-build image-build
|
||||
|
||||
.PHONY: experiment-build
|
||||
experiment-build:
|
||||
@echo "------------------------------"
|
||||
@echo "--> Build experiment go binary"
|
||||
@echo "------------------------------"
|
||||
@./build/go-multiarch-build.sh build/generate_go_binary
|
||||
|
||||
.PHONY: image-build
|
||||
image-build:
|
||||
@echo "-------------------------"
|
||||
@echo "--> Build go-runner image"
|
||||
@echo "-------------------------"
|
||||
@sudo docker buildx build --file build/litmus-go/Dockerfile --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag litmuschaos/go-runner:ci .
|
||||
@sudo docker buildx build --file build/litmus-go/Dockerfile --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag $(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
|
||||
.PHONY: build-amd64
|
||||
build-amd64:
|
||||
|
@ -87,7 +97,7 @@ build-amd64:
|
|||
@echo "-------------------------"
|
||||
@echo "--> Build go-runner image"
|
||||
@echo "-------------------------"
|
||||
@sudo docker build --file build/litmus-go/Dockerfile --tag litmuschaos/go-runner:ci . --build-arg TARGETARCH=amd64
|
||||
@sudo docker build --file build/litmus-go/Dockerfile --tag $(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETARCH=amd64
|
||||
|
||||
.PHONY: push-amd64
|
||||
push-amd64:
|
||||
|
@ -95,7 +105,7 @@ push-amd64:
|
|||
@echo "------------------------------"
|
||||
@echo "--> Pushing image"
|
||||
@echo "------------------------------"
|
||||
@sudo docker push litmuschaos/go-runner:ci
|
||||
@sudo docker push $(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||
|
||||
.PHONY: push
|
||||
push: litmus-go-push
|
||||
|
@ -104,7 +114,7 @@ litmus-go-push:
|
|||
@echo "-------------------"
|
||||
@echo "--> go-runner image"
|
||||
@echo "-------------------"
|
||||
REPONAME="litmuschaos" IMGNAME="go-runner" IMGTAG="ci" ./build/push
|
||||
REPONAME="$(DOCKER_REPO)" IMGNAME="$(DOCKER_IMAGE)" IMGTAG="$(DOCKER_TAG)" ./build/push
|
||||
|
||||
.PHONY: trivy-check
|
||||
trivy-check:
|
||||
|
@ -112,6 +122,5 @@ trivy-check:
|
|||
@echo "------------------------"
|
||||
@echo "---> Running Trivy Check"
|
||||
@echo "------------------------"
|
||||
@./trivy --exit-code 0 --severity HIGH --no-progress litmuschaos/go-runner:ci
|
||||
@./trivy --exit-code 0 --severity CRITICAL --no-progress litmuschaos/go-runner:ci
|
||||
|
||||
@./trivy --exit-code 0 --severity HIGH --no-progress $(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||
@./trivy --exit-code 0 --severity CRITICAL --no-progress $(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||
|
|
|
@ -20,13 +20,13 @@ then
|
|||
# Push image to docker hub
|
||||
echo "Pushing ${REPONAME}/${IMGNAME}:${IMGTAG} ...";
|
||||
sudo docker buildx build --file build/litmus-go/Dockerfile --push --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag ${REPONAME}/${IMGNAME}:${IMGTAG} .
|
||||
if [ ! -z "${TRAVIS_TAG}" ] ;
|
||||
if [ ! -z "${RELEASE_TAG}" ] ;
|
||||
then
|
||||
# Push with different tags if tagged as a release
|
||||
# When github is tagged with a release, then Travis will
|
||||
# set the release tag in env TRAVIS_TAG
|
||||
echo "Pushing ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} ...";
|
||||
sudo docker buildx build --file build/litmus-go/Dockerfile --push --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} .
|
||||
# set the release tag in env RELEASE_TAG
|
||||
echo "Pushing ${REPONAME}/${IMGNAME}:${RELEASE_TAG} ...";
|
||||
sudo docker buildx build --file build/litmus-go/Dockerfile --push --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag ${REPONAME}/${IMGNAME}:${RELEASE_TAG} .
|
||||
echo "Pushing ${REPONAME}/${IMGNAME}:latest ...";
|
||||
sudo docker buildx build --file build/litmus-go/Dockerfile --push --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag ${REPONAME}/${IMGNAME}:latest .
|
||||
fi;
|
||||
|
|
|
@ -292,7 +292,7 @@ func parsePIDFromJSON(j []byte, runtime string) (int, error) {
|
|||
return 0, errors.Errorf("[cri]: No supported container runtime, runtime: %v", runtime)
|
||||
}
|
||||
if pid == 0 {
|
||||
return 0, errors.Errorf("[cri]: No running target container found, pid: %v", string(pid))
|
||||
return 0, errors.Errorf("[cri]: No running target container found, pid: %d", pid)
|
||||
}
|
||||
|
||||
return pid, nil
|
||||
|
|
|
@ -69,7 +69,7 @@ func parsePIDFromJSON(j []byte, runtime string) (int, error) {
|
|||
}
|
||||
|
||||
if pid == 0 {
|
||||
return 0, errors.Errorf("[cri] no running target container found, pid: %v", string(pid))
|
||||
return 0, errors.Errorf("[cri] no running target container found, pid: %d", pid)
|
||||
}
|
||||
|
||||
return pid, nil
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -137,7 +138,7 @@ func DrainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl
|
|||
|
||||
log.Infof("[Inject]: Draining the %v node", experimentsDetails.TargetNode)
|
||||
|
||||
command := exec.Command("kubectl", "drain", experimentsDetails.TargetNode, "--ignore-daemonsets", "--delete-local-data", "--force")
|
||||
command := exec.Command("kubectl", "drain", experimentsDetails.TargetNode, "--ignore-daemonsets", "--delete-local-data", "--force", "--timeout", strconv.Itoa(experimentsDetails.ChaosDuration)+"s")
|
||||
var out, stderr bytes.Buffer
|
||||
command.Stdout = &out
|
||||
command.Stderr = &stderr
|
||||
|
@ -146,7 +147,7 @@ func DrainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl
|
|||
return fmt.Errorf("Unable to drain the %v node, err: %v", experimentsDetails.TargetNode, err)
|
||||
}
|
||||
|
||||
err = retry.
|
||||
return retry.
|
||||
Times(90).
|
||||
Wait(1 * time.Second).
|
||||
Try(func(attempt uint) error {
|
||||
|
@ -159,8 +160,6 @@ func DrainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients cl
|
|||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UncordonNode uncordon the application node
|
||||
|
@ -177,7 +176,7 @@ func UncordonNode(experimentsDetails *experimentTypes.ExperimentDetails, clients
|
|||
return fmt.Errorf("Unable to uncordon the %v node, err: %v", experimentsDetails.TargetNode, err)
|
||||
}
|
||||
|
||||
err = retry.
|
||||
return retry.
|
||||
Times(90).
|
||||
Wait(1 * time.Second).
|
||||
Try(func(attempt uint) error {
|
||||
|
@ -190,6 +189,4 @@ func UncordonNode(experimentsDetails *experimentTypes.ExperimentDetails, clients
|
|||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c
|
|||
|
||||
//Checking the status of helper pod
|
||||
log.Info("[Status]: Checking the status of the helper pod")
|
||||
err = status.CheckApplicationStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients)
|
||||
err = CheckApplicationStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients)
|
||||
if err != nil {
|
||||
common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
|
||||
return errors.Errorf("helper pod is not in running state, err: %v", err)
|
||||
|
@ -223,3 +223,22 @@ func GetNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clie
|
|||
|
||||
return &podForNodeCandidate, nil
|
||||
}
|
||||
|
||||
// CheckApplicationStatus checks the status of the AUT
|
||||
func CheckApplicationStatus(appNs, appLabel string, timeout, delay int, clients clients.ClientSets) error {
|
||||
|
||||
// Checking whether application containers are in ready state
|
||||
log.Info("[Status]: Checking whether application containers are in ready state")
|
||||
err := status.CheckContainerStatus(appNs, appLabel, timeout, delay, clients)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Checking whether application pods are in running or completed state
|
||||
log.Info("[Status]: Checking whether application pods are in running or completed state")
|
||||
err = status.CheckPodStatusPhase(appNs, appLabel, timeout, delay, clients, "Running", "Completed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
"github.com/litmuschaos/litmus-go/pkg/events"
|
||||
experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/types"
|
||||
"github.com/litmuschaos/litmus-go/pkg/log"
|
||||
"github.com/litmuschaos/litmus-go/pkg/probe"
|
||||
"github.com/litmuschaos/litmus-go/pkg/result"
|
||||
"github.com/litmuschaos/litmus-go/pkg/types"
|
||||
"github.com/litmuschaos/litmus-go/pkg/utils/common"
|
||||
|
@ -44,40 +46,40 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails,
|
|||
switch strings.ToLower(experimentsDetails.AppKind) {
|
||||
case "deployment", "deployments":
|
||||
|
||||
appName, replicaCount, err := GetDeploymentDetails(experimentsDetails, clients)
|
||||
appsUnderTest, err := GetDeploymentDetails(experimentsDetails, clients)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to get the name & replicaCount of the deployment, err: %v", err)
|
||||
}
|
||||
|
||||
//calling go routine which will continuously watch for the abort signal
|
||||
go AbortPodAutoScalerChaos(replicaCount, appName, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
|
||||
go AbortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
|
||||
|
||||
err = PodAutoscalerChaosInDeployment(experimentsDetails, clients, replicaCount, appName, resultDetails, eventsDetails, chaosDetails)
|
||||
err = PodAutoscalerChaosInDeployment(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to perform autoscaling, err: %v", err)
|
||||
}
|
||||
|
||||
err = AutoscalerRecoveryInDeployment(experimentsDetails, clients, replicaCount, appName)
|
||||
err = AutoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to rollback the autoscaling, err: %v", err)
|
||||
}
|
||||
|
||||
case "statefulset", "statefulsets":
|
||||
|
||||
appName, replicaCount, err := GetStatefulsetDetails(experimentsDetails, clients)
|
||||
appsUnderTest, err := GetStatefulsetDetails(experimentsDetails, clients)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to get the name & replicaCount of the statefulset, err: %v", err)
|
||||
}
|
||||
|
||||
//calling go routine which will continuously watch for the abort signal
|
||||
go AbortPodAutoScalerChaos(replicaCount, appName, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
|
||||
go AbortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
|
||||
|
||||
err = PodAutoscalerChaosInStatefulset(experimentsDetails, clients, replicaCount, appName, resultDetails, eventsDetails, chaosDetails)
|
||||
err = PodAutoscalerChaosInStatefulset(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to perform autoscaling, err: %v", err)
|
||||
}
|
||||
|
||||
err = AutoscalerRecoveryInStatefulset(experimentsDetails, clients, replicaCount, appName)
|
||||
err = AutoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to rollback the autoscaling, err: %v", err)
|
||||
}
|
||||
|
@ -94,64 +96,77 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails,
|
|||
return nil
|
||||
}
|
||||
|
||||
//GetDeploymentDetails is used to get the name and total number of replicas of the deployment
|
||||
func GetDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, int, error) {
|
||||
func getSliceOfTotalApplicationsTargeted(appList []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) {
|
||||
|
||||
var appReplica int
|
||||
var appName string
|
||||
slice := int(math.Round(float64(len(appList)*experimentsDetails.AppAffectPercentage) / float64(100)))
|
||||
if slice < 0 || slice > len(appList) {
|
||||
return nil, errors.Errorf("slice of applications to target out of range %d/%d", slice, len(appList))
|
||||
}
|
||||
return appList[:slice], nil
|
||||
}
|
||||
|
||||
//GetDeploymentDetails is used to get the name and total number of replicas of the deployment
|
||||
func GetDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) ([]experimentTypes.ApplicationUnderTest, error) {
|
||||
|
||||
deploymentList, err := appsv1DeploymentClient.List(metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})
|
||||
if err != nil || len(deploymentList.Items) == 0 {
|
||||
return "", 0, errors.Errorf("Unable to find the deployments with matching labels, err: %v", err)
|
||||
return nil, errors.Errorf("Unable to find the deployments with matching labels, err: %v", err)
|
||||
}
|
||||
appsUnderTest := []experimentTypes.ApplicationUnderTest{}
|
||||
for _, app := range deploymentList.Items {
|
||||
appReplica = int(*app.Spec.Replicas)
|
||||
appName = app.Name
|
||||
log.Infof("[DeploymentDetails]: Found deployment name %s with replica count %d", app.Name, int(*app.Spec.Replicas))
|
||||
appsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)})
|
||||
}
|
||||
// Applying the APP_AFFECT_PERC variable to determine the total target deployments to scale
|
||||
return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails)
|
||||
|
||||
return appName, appReplica, nil
|
||||
}
|
||||
|
||||
//GetStatefulsetDetails is used to get the name and total number of replicas of the statefulsets
|
||||
func GetStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, int, error) {
|
||||
|
||||
var appReplica int
|
||||
var appName string
|
||||
func GetStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) ([]experimentTypes.ApplicationUnderTest, error) {
|
||||
|
||||
statefulsetList, err := appsv1StatefulsetClient.List(metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})
|
||||
if err != nil || len(statefulsetList.Items) == 0 {
|
||||
return "", 0, errors.Errorf("Unable to find the statefulsets with matching labels, err: %v", err)
|
||||
}
|
||||
for _, app := range statefulsetList.Items {
|
||||
appReplica = int(*app.Spec.Replicas)
|
||||
appName = app.Name
|
||||
return nil, errors.Errorf("Unable to find the statefulsets with matching labels, err: %v", err)
|
||||
}
|
||||
|
||||
return appName, appReplica, nil
|
||||
appsUnderTest := []experimentTypes.ApplicationUnderTest{}
|
||||
for _, app := range statefulsetList.Items {
|
||||
log.Infof("[DeploymentDetails]: Found statefulset name %s with replica count %d", app.Name, int(*app.Spec.Replicas))
|
||||
appsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)})
|
||||
}
|
||||
// Applying the APP_AFFECT_PERC variable to determine the total target deployments to scale
|
||||
return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails)
|
||||
}
|
||||
|
||||
//PodAutoscalerChaosInDeployment scales up the replicas of deployment and verify the status
|
||||
func PodAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, replicaCount int, appName string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
func PodAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
|
||||
// Scale Application
|
||||
retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Deployment before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1DeploymentClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to get latest version of Application Deployment, err: %v", err)
|
||||
for _, app := range appsUnderTest {
|
||||
// Retrieve the latest version of Deployment before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to get latest version of Application Deployment, err: %v", err)
|
||||
}
|
||||
// modifying the replica count
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))
|
||||
log.Infof("Updating deployment %s to number of replicas %d", appUnderTest.ObjectMeta.Name, experimentsDetails.Replicas)
|
||||
_, err = appsv1DeploymentClient.Update(appUnderTest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// modifying the replica count
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))
|
||||
_, updateErr := appsv1DeploymentClient.Update(appUnderTest)
|
||||
return updateErr
|
||||
return nil
|
||||
})
|
||||
if retryErr != nil {
|
||||
return errors.Errorf("Unable to scale the deployment, err: %v", retryErr)
|
||||
}
|
||||
log.Info("Application Started Scaling")
|
||||
|
||||
err = DeploymentStatusCheck(experimentsDetails, appName, clients, replicaCount, resultDetails, eventsDetails, chaosDetails)
|
||||
err = DeploymentStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)
|
||||
if err != nil {
|
||||
return errors.Errorf("Status Check failed, err: %v", err)
|
||||
}
|
||||
|
@ -160,27 +175,32 @@ func PodAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.Experime
|
|||
}
|
||||
|
||||
//PodAutoscalerChaosInStatefulset scales up the replicas of statefulset and verify the status
|
||||
func PodAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, replicaCount int, appName string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
func PodAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
|
||||
// Scale Application
|
||||
retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Statefulset before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1StatefulsetClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to get latest version of Application Statefulset, err: %v", err)
|
||||
for _, app := range appsUnderTest {
|
||||
// Retrieve the latest version of Statefulset before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1StatefulsetClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to get latest version of Application Statefulset, err: %v", err)
|
||||
}
|
||||
// modifying the replica count
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))
|
||||
_, err = appsv1StatefulsetClient.Update(appUnderTest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// modifying the replica count
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))
|
||||
_, updateErr := appsv1StatefulsetClient.Update(appUnderTest)
|
||||
return updateErr
|
||||
return nil
|
||||
})
|
||||
if retryErr != nil {
|
||||
return errors.Errorf("Unable to scale the statefulset, err: %v", retryErr)
|
||||
}
|
||||
log.Info("Application Started Scaling")
|
||||
|
||||
err = StatefulsetStatusCheck(experimentsDetails, appName, clients, replicaCount, resultDetails, eventsDetails, chaosDetails)
|
||||
err = StatefulsetStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)
|
||||
if err != nil {
|
||||
return errors.Errorf("Status Check failed, err: %v", err)
|
||||
}
|
||||
|
@ -189,7 +209,7 @@ func PodAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.Experim
|
|||
}
|
||||
|
||||
// DeploymentStatusCheck check the status of deployment and verify the available replicas
|
||||
func DeploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, appName string, clients clients.ClientSets, replicaCount int, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
func DeploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
|
||||
//Record start timestamp
|
||||
ChaosStartTimeStamp := time.Now().Unix()
|
||||
|
@ -199,30 +219,38 @@ func DeploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails
|
|||
Times(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).
|
||||
Wait(time.Duration(experimentsDetails.Delay) * time.Second).
|
||||
Try(func(attempt uint) error {
|
||||
|
||||
deployment, err := appsv1DeploymentClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the deployment with name %v, err: %v", appName, err)
|
||||
}
|
||||
log.Infof("Deployment's Available Replica Count is %v", deployment.Status.AvailableReplicas)
|
||||
if int(deployment.Status.AvailableReplicas) != experimentsDetails.Replicas {
|
||||
isFailed = true
|
||||
return errors.Errorf("Application is not scaled yet, err: %v", err)
|
||||
for _, app := range appsUnderTest {
|
||||
deployment, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the deployment with name %v, err: %v", app.AppName, err)
|
||||
}
|
||||
log.Infof("Deployment's Available Replica Count is %v", deployment.Status.AvailableReplicas)
|
||||
if int(deployment.Status.AvailableReplicas) != app.ReplicaCount {
|
||||
isFailed = true
|
||||
return errors.Errorf("Application %s is not scaled yet, err: %v", app.AppName, err)
|
||||
}
|
||||
}
|
||||
isFailed = false
|
||||
return nil
|
||||
})
|
||||
|
||||
if isFailed {
|
||||
err = AutoscalerRecoveryInDeployment(experimentsDetails, clients, replicaCount, appName)
|
||||
err = AutoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to perform autoscaling, err: %v", err)
|
||||
}
|
||||
return errors.Errorf("Failed to scale the application")
|
||||
} else if err != nil {
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// run the probes during chaos
|
||||
if len(resultDetails.ProbeDetails) != 0 {
|
||||
if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//ChaosCurrentTimeStamp contains the current timestamp
|
||||
ChaosCurrentTimeStamp := time.Now().Unix()
|
||||
if int(ChaosCurrentTimeStamp-ChaosStartTimeStamp) <= experimentsDetails.ChaosDuration {
|
||||
|
@ -234,7 +262,7 @@ func DeploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails
|
|||
}
|
||||
|
||||
// StatefulsetStatusCheck check the status of statefulset and verify the available replicas
|
||||
func StatefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, appName string, clients clients.ClientSets, replicaCount int, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
func StatefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
|
||||
//Record start timestamp
|
||||
ChaosStartTimeStamp := time.Now().Unix()
|
||||
|
@ -244,29 +272,39 @@ func StatefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetail
|
|||
Times(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).
|
||||
Wait(time.Duration(experimentsDetails.Delay) * time.Second).
|
||||
Try(func(attempt uint) error {
|
||||
statefulset, err := appsv1StatefulsetClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the statefulset with name %v, err: %v", appName, err)
|
||||
}
|
||||
log.Infof("Statefulset's Ready Replica Count is: %v", statefulset.Status.ReadyReplicas)
|
||||
if int(statefulset.Status.ReadyReplicas) != experimentsDetails.Replicas {
|
||||
isFailed = true
|
||||
return errors.Errorf("Application is not scaled yet, err: %v", err)
|
||||
for _, app := range appsUnderTest {
|
||||
statefulset, err := appsv1StatefulsetClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the statefulset with name %v, err: %v", app.AppName, err)
|
||||
}
|
||||
log.Infof("Statefulset's Ready Replica Count is: %v", statefulset.Status.ReadyReplicas)
|
||||
if int(statefulset.Status.ReadyReplicas) != experimentsDetails.Replicas {
|
||||
isFailed = true
|
||||
return errors.Errorf("Application is not scaled yet, err: %v", err)
|
||||
}
|
||||
}
|
||||
isFailed = false
|
||||
return nil
|
||||
})
|
||||
|
||||
if isFailed {
|
||||
err = AutoscalerRecoveryInStatefulset(experimentsDetails, clients, replicaCount, appName)
|
||||
err = AutoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest)
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to perform autoscaling, err: %v", err)
|
||||
}
|
||||
return errors.Errorf("Failed to scale the application")
|
||||
} else if err != nil {
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// run the probes during chaos
|
||||
if len(resultDetails.ProbeDetails) != 0 {
|
||||
if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//ChaosCurrentTimeStamp contains the current timestamp
|
||||
ChaosCurrentTimeStamp := time.Now().Unix()
|
||||
if int(ChaosCurrentTimeStamp-ChaosStartTimeStamp) <= experimentsDetails.ChaosDuration {
|
||||
|
@ -278,20 +316,25 @@ func StatefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetail
|
|||
}
|
||||
|
||||
//AutoscalerRecoveryInDeployment rollback the replicas to initial values in deployment
|
||||
func AutoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, replicaCount int, appName string) error {
|
||||
func AutoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest) error {
|
||||
|
||||
// Scale back to initial number of replicas
|
||||
retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Deployment before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1DeploymentClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to find the latest version of Application Deployment with name %v, err: %v", appName, err)
|
||||
}
|
||||
for _, app := range appsUnderTest {
|
||||
appUnderTest, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to find the latest version of Application Deployment with name %v, err: %v", app.AppName, err)
|
||||
}
|
||||
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(replicaCount)) // modify replica count
|
||||
_, updateErr := appsv1DeploymentClient.Update(appUnderTest)
|
||||
return updateErr
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count
|
||||
_, err = appsv1DeploymentClient.Update(appUnderTest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if retryErr != nil {
|
||||
return errors.Errorf("Unable to rollback the deployment, err: %v", retryErr)
|
||||
|
@ -302,13 +345,15 @@ func AutoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.Experime
|
|||
Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
|
||||
Wait(time.Duration(experimentsDetails.Delay) * time.Second).
|
||||
Try(func(attempt uint) error {
|
||||
applicationDeploy, err := appsv1DeploymentClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the deployment with name %v, err: %v", appName, err)
|
||||
}
|
||||
if int(applicationDeploy.Status.AvailableReplicas) != replicaCount {
|
||||
log.Infof("Application Available Replica Count is: %v", applicationDeploy.Status.AvailableReplicas)
|
||||
return errors.Errorf("Unable to rollback to older replica count, err: %v", err)
|
||||
for _, app := range appsUnderTest {
|
||||
applicationDeploy, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the deployment with name %v, err: %v", app.AppName, err)
|
||||
}
|
||||
if int(applicationDeploy.Status.AvailableReplicas) != app.ReplicaCount {
|
||||
log.Infof("Application Available Replica Count is: %v", applicationDeploy.Status.AvailableReplicas)
|
||||
return errors.Errorf("Unable to rollback to older replica count, err: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -322,20 +367,25 @@ func AutoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.Experime
|
|||
}
|
||||
|
||||
//AutoscalerRecoveryInStatefulset rollback the replicas to initial values in deployment
|
||||
func AutoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, replicaCount int, appName string) error {
|
||||
func AutoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest) error {
|
||||
|
||||
// Scale back to initial number of replicas
|
||||
retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
|
||||
// Retrieve the latest version of Statefulset before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1StatefulsetClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to find the latest version of Statefulset with name %v, err: %v", appName, err)
|
||||
}
|
||||
for _, app := range appsUnderTest {
|
||||
// Retrieve the latest version of Statefulset before attempting update
|
||||
// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
|
||||
appUnderTest, err := appsv1StatefulsetClient.Get(app.AppName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to find the latest version of Statefulset with name %v, err: %v", app.AppName, err)
|
||||
}
|
||||
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(replicaCount)) // modify replica count
|
||||
_, updateErr := appsv1StatefulsetClient.Update(appUnderTest)
|
||||
return updateErr
|
||||
appUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count
|
||||
_, err = appsv1StatefulsetClient.Update(appUnderTest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if retryErr != nil {
|
||||
return errors.Errorf("Unable to rollback the statefulset, err: %v", retryErr)
|
||||
|
@ -346,13 +396,16 @@ func AutoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.Experim
|
|||
Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
|
||||
Wait(time.Duration(experimentsDetails.Delay) * time.Second).
|
||||
Try(func(attempt uint) error {
|
||||
applicationDeploy, err := appsv1StatefulsetClient.Get(appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the statefulset with name %v, err: %v", appName, err)
|
||||
}
|
||||
if int(applicationDeploy.Status.ReadyReplicas) != replicaCount {
|
||||
log.Infof("Application Ready Replica Count is: %v", applicationDeploy.Status.ReadyReplicas)
|
||||
return errors.Errorf("Unable to roll back to older replica count, err: %v", err)
|
||||
for _, app := range appsUnderTest {
|
||||
applicationDeploy, err := appsv1StatefulsetClient.Get(app.AppName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Errorf("Unable to find the statefulset with name %v, err: %v", app.AppName, err)
|
||||
}
|
||||
if int(applicationDeploy.Status.ReadyReplicas) != app.ReplicaCount {
|
||||
log.Infof("Application Ready Replica Count is: %v", applicationDeploy.Status.ReadyReplicas)
|
||||
return errors.Errorf("Unable to roll back to older replica count, err: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -368,7 +421,7 @@ func AutoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.Experim
|
|||
func int32Ptr(i int32) *int32 { return &i }
|
||||
|
||||
//AbortPodAutoScalerChaos go routine will continuously watch for the abort signal for the entire chaos duration and generate the required events and result
|
||||
func AbortPodAutoScalerChaos(replicaCount int, appName string, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
func AbortPodAutoScalerChaos(appsUnderTest []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
|
||||
|
||||
// signChan channel is used to transmit signal notifications.
|
||||
signChan := make(chan os.Signal, 1)
|
||||
|
@ -403,14 +456,12 @@ func AbortPodAutoScalerChaos(replicaCount int, appName string, experimentsDetail
|
|||
// Other experiments have simpler "recoveries" that are more or less guaranteed to work.
|
||||
switch strings.ToLower(experimentsDetails.AppKind) {
|
||||
case "deployment", "deployments":
|
||||
|
||||
if err := AutoscalerRecoveryInDeployment(experimentsDetails, clients, replicaCount, appName); err != nil {
|
||||
if err := AutoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest); err != nil {
|
||||
log.Errorf("the recovery after abortion failed err: %v", err)
|
||||
}
|
||||
|
||||
case "statefulset", "statefulsets":
|
||||
|
||||
if err := AutoscalerRecoveryInStatefulset(experimentsDetails, clients, replicaCount, appName); err != nil {
|
||||
if err := AutoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest); err != nil {
|
||||
log.Errorf("the recovery after abortion failed err: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie
|
|||
Name: "dockersocket",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{
|
||||
Path: "/var/run/docker.sock",
|
||||
Path: experimentsDetails.SocketPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -204,7 +204,7 @@ func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie
|
|||
VolumeMounts: []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "dockersocket",
|
||||
MountPath: "/var/run/docker.sock",
|
||||
MountPath: experimentsDetails.SocketPath,
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
|
||||
|
|
|
@ -190,7 +190,7 @@ func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie
|
|||
Name: "dockersocket",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{
|
||||
Path: "/var/run/docker.sock",
|
||||
Path: experimentsDetails.SocketPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -204,7 +204,7 @@ func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie
|
|||
VolumeMounts: []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "dockersocket",
|
||||
MountPath: "/var/run/docker.sock",
|
||||
MountPath: experimentsDetails.SocketPath,
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
|
||||
|
|
|
@ -192,7 +192,7 @@ func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie
|
|||
Name: "dockersocket",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{
|
||||
Path: "/var/run/docker.sock",
|
||||
Path: experimentsDetails.SocketPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -206,7 +206,7 @@ func CreateHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie
|
|||
VolumeMounts: []apiv1.VolumeMount{
|
||||
{
|
||||
Name: "dockersocket",
|
||||
MountPath: "/var/run/docker.sock",
|
||||
MountPath: experimentsDetails.SocketPath,
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
|
||||
|
|
7
go.mod
7
go.mod
|
@ -4,17 +4,12 @@ go 1.13
|
|||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.17.7
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c // indirect
|
||||
github.com/containerd/ttrpc v1.0.2 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/emicklei/go-restful v2.12.0+incompatible // indirect
|
||||
github.com/go-openapi/spec v0.19.7 // indirect
|
||||
github.com/go-openapi/swag v0.19.9 // indirect
|
||||
github.com/gogo/googleapis v1.4.0 // indirect
|
||||
github.com/google/go-cmp v0.5.2 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||
github.com/kr/pretty v0.2.0 // indirect
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible
|
||||
github.com/litmuschaos/chaos-operator v0.0.0-20201210172142-57fddee6734e
|
||||
|
|
32
go.sum
32
go.sum
|
@ -105,7 +105,6 @@ github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1
|
|||
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
|
||||
github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 h1:a1zrFsLFac2xoM6zG1u72DWJwZG3ayttYLfmLbxVETk=
|
||||
github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
|
||||
|
@ -113,8 +112,6 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe
|
|||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0=
|
||||
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
|
@ -123,10 +120,6 @@ github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJ
|
|||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c h1:1c6xmkNiu6Jnr6AKGM91GGNsfU+nPNFvw9BZFSo0E+c=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
|
||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 h1:14r0i3IeJj6zkNLigAJiv/TWSR8EY+pxIjv5tFiT+n8=
|
||||
github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
|
@ -144,15 +137,12 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
|
|||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a h1:W8b4lQ4tFF21aspRGoBuCNV6V2fFJBF+pm1J6OY8Lys=
|
||||
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/prometheus-operator v0.34.0/go.mod h1:Li6rMllG/hYIyXfMuvUwhyC+hqwJVHdsDdP21hypT1M=
|
||||
github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
|
@ -185,8 +175,6 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
|
|||
github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
|
@ -311,12 +299,8 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA
|
|||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
|
||||
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=
|
||||
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
|
@ -473,8 +457,6 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo
|
|||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
|
@ -594,8 +576,6 @@ github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830 h1:yvQ/2
|
|||
github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg=
|
||||
github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/openebs/maya v0.0.0-20200411140727-1c81f9e017b0 h1:9o6+N3YkssQvUlmJnqNULSxsGFO/rb1we8MtYKr5ze4=
|
||||
github.com/openebs/maya v0.0.0-20200411140727-1c81f9e017b0/go.mod h1:QQY9cOHKQwZ73qbv6O//UYUBLNV2S0MRDIfG7t5KOCk=
|
||||
|
@ -663,7 +643,6 @@ github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R
|
|||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
|
@ -701,8 +680,6 @@ github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM
|
|||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
|
@ -748,7 +725,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1
|
|||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
|
@ -894,12 +870,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -968,7 +938,6 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
|
@ -978,7 +947,6 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
|
|||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
|
|
@ -64,6 +64,8 @@ func GenerateEvents(eventsDetails *types.EventDetails, clients clients.ClientSet
|
|||
} else {
|
||||
event.LastTimestamp = metav1.Time{Time: time.Now()}
|
||||
event.Count = event.Count + 1
|
||||
event.Source.Component = chaosDetails.ChaosPodName
|
||||
event.Message = eventsDetails.Message
|
||||
_, err = clients.KubeClient.CoreV1().Events(chaosDetails.ChaosNamespace).Update(event)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -67,4 +67,5 @@ func InitialiseChaosVariables(chaosDetails *types.ChaosDetails, experimentDetail
|
|||
chaosDetails.Delay = experimentDetails.Delay
|
||||
chaosDetails.AppDetail = appDetails
|
||||
chaosDetails.JobCleanupPolicy = Getenv("JOB_CLEANUP_POLICY", "retain")
|
||||
chaosDetails.ProbeImagePullPolicy = experimentDetails.LIBImagePullPolicy
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
|
|||
experimentDetails.AppNS = Getenv("APP_NAMESPACE", "")
|
||||
experimentDetails.AppLabel = Getenv("APP_LABEL", "")
|
||||
experimentDetails.AppKind = Getenv("APP_KIND", "")
|
||||
experimentDetails.AppAffectPercentage, _ = strconv.Atoi(Getenv("APP_AFFECT_PERC", "100"))
|
||||
experimentDetails.Replicas, _ = strconv.Atoi(Getenv("REPLICA_COUNT", ""))
|
||||
experimentDetails.ChaosUID = clientTypes.UID(Getenv("CHAOS_UID", ""))
|
||||
experimentDetails.InstanceID = Getenv("INSTANCE_ID", "")
|
||||
|
|
|
@ -6,21 +6,28 @@ import (
|
|||
|
||||
// ExperimentDetails is for collecting all the experiment-related details
|
||||
type ExperimentDetails struct {
|
||||
ExperimentName string
|
||||
EngineName string
|
||||
ChaosDuration int
|
||||
RampTime int
|
||||
Replicas int
|
||||
ChaosLib string
|
||||
AppNS string
|
||||
AppLabel string
|
||||
AppKind string
|
||||
ChaosUID clientTypes.UID
|
||||
InstanceID string
|
||||
ChaosNamespace string
|
||||
ChaosPodName string
|
||||
RunID string
|
||||
AuxiliaryAppInfo string
|
||||
Timeout int
|
||||
Delay int
|
||||
ExperimentName string
|
||||
EngineName string
|
||||
ChaosDuration int
|
||||
RampTime int
|
||||
Replicas int
|
||||
ChaosLib string
|
||||
AppNS string
|
||||
AppLabel string
|
||||
AppKind string
|
||||
AppAffectPercentage int
|
||||
ChaosUID clientTypes.UID
|
||||
InstanceID string
|
||||
ChaosNamespace string
|
||||
ChaosPodName string
|
||||
RunID string
|
||||
AuxiliaryAppInfo string
|
||||
Timeout int
|
||||
Delay int
|
||||
}
|
||||
|
||||
// ApplicationUnderTest contains the name of the deployment object and the current replica count
|
||||
type ApplicationUnderTest struct {
|
||||
AppName string
|
||||
ReplicaCount int
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
|
|||
experimentDetails.LIBImagePullPolicy = Getenv("LIB_IMAGE_PULL_POLICY", "Always")
|
||||
experimentDetails.TargetContainer = Getenv("TARGET_CONTAINER", "")
|
||||
experimentDetails.Sequence = Getenv("SEQUENCE", "parallel")
|
||||
experimentDetails.SocketPath = Getenv("SOCKET_PATH", "/var/run/docker.sock")
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -32,5 +32,6 @@ type ExperimentDetails struct {
|
|||
Annotations map[string]string
|
||||
TargetContainer string
|
||||
Sequence string
|
||||
SocketPath string
|
||||
Resources corev1.ResourceRequirements
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
|
|||
experimentDetails.PodsAffectedPerc, _ = strconv.Atoi(Getenv("PODS_AFFECTED_PERC", "0"))
|
||||
experimentDetails.Sequence = Getenv("SEQUENCE", "parallel")
|
||||
experimentDetails.VolumeMountPath = Getenv("VOLUME_MOUNT_PATH", "")
|
||||
experimentDetails.SocketPath = Getenv("SOCKET_PATH", "/var/run/docker.sock")
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -31,5 +31,6 @@ type ExperimentDetails struct {
|
|||
PodsAffectedPerc int
|
||||
Sequence string
|
||||
VolumeMountPath string
|
||||
SocketPath string
|
||||
Resources corev1.ResourceRequirements
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
|
|||
experimentDetails.LIBImagePullPolicy = Getenv("LIB_IMAGE_PULL_POLICY", "Always")
|
||||
experimentDetails.TargetContainer = Getenv("TARGET_CONTAINER", "")
|
||||
experimentDetails.Sequence = Getenv("SEQUENCE", "parallel")
|
||||
experimentDetails.SocketPath = Getenv("SOCKET_PATH", "/var/run/docker.sock")
|
||||
}
|
||||
|
||||
// Getenv fetch the env and set the default value, if any
|
||||
|
|
|
@ -31,5 +31,6 @@ type ExperimentDetails struct {
|
|||
Annotations map[string]string
|
||||
TargetContainer string
|
||||
Sequence string
|
||||
SocketPath string
|
||||
Resources corev1.ResourceRequirements
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ func CreateProbePod(clients clients.ClientSets, chaosDetails *types.ChaosDetails
|
|||
{
|
||||
Name: chaosDetails.ExperimentName + "-probe",
|
||||
Image: source,
|
||||
ImagePullPolicy: apiv1.PullAlways,
|
||||
ImagePullPolicy: apiv1.PullPolicy(chaosDetails.ProbeImagePullPolicy),
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
},
|
||||
|
|
|
@ -72,12 +72,12 @@ func TriggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets,
|
|||
switch probe.Operation {
|
||||
case "create", "Create":
|
||||
if err = CreateResource(probe, gvr, clients); err != nil {
|
||||
log.Errorf("The %v k8s probe has been Failed, err: %v", probe.Name, err)
|
||||
log.Errorf("The %v k8s probe has Failed, err: %v", probe.Name, err)
|
||||
return err
|
||||
}
|
||||
case "delete", "Delete":
|
||||
if err = DeleteResource(probe, gvr, clients); err != nil {
|
||||
log.Errorf("The %v k8s probe has been Failed, err: %v", probe.Name, err)
|
||||
log.Errorf("The %v k8s probe has Failed, err: %v", probe.Name, err)
|
||||
return err
|
||||
}
|
||||
case "present", "Present":
|
||||
|
@ -86,7 +86,7 @@ func TriggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets,
|
|||
LabelSelector: cmd.LabelSelector,
|
||||
})
|
||||
if err != nil || len(resourceList.Items) == 0 {
|
||||
log.Errorf("The %v k8s probe has been Failed, err: %v", probe.Name, err)
|
||||
log.Errorf("The %v k8s probe has Failed, err: %v", probe.Name, err)
|
||||
return fmt.Errorf("unable to list the resources with matching selector, err: %v", err)
|
||||
}
|
||||
case "absent", "Absent":
|
||||
|
@ -98,7 +98,7 @@ func TriggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets,
|
|||
return fmt.Errorf("unable to list the resources with matching selector, err: %v", err)
|
||||
}
|
||||
if len(resourceList.Items) != 0 {
|
||||
log.Errorf("The %v k8s probe has been Failed, err: %v", probe.Name, err)
|
||||
log.Errorf("The %v k8s probe has Failed, err: %v", probe.Name, err)
|
||||
return fmt.Errorf("Resource is not deleted yet due to, err: %v", err)
|
||||
}
|
||||
default:
|
||||
|
@ -148,7 +148,6 @@ func CreateResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResou
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Command.Namespace).Create(data, v1.CreateOptions{})
|
||||
|
||||
return err
|
||||
|
|
|
@ -47,8 +47,8 @@ func CheckAuxiliaryApplicationStatus(AuxiliaryAppDetails string, timeout, delay
|
|||
return nil
|
||||
}
|
||||
|
||||
// CheckPodStatus checks the running status of the application pod
|
||||
func CheckPodStatus(appNs, appLabel string, timeout, delay int, clients clients.ClientSets) error {
|
||||
// CheckPodStatusPhase checks the status of the application pod
|
||||
func CheckPodStatusPhase(appNs, appLabel string, timeout, delay int, clients clients.ClientSets, states ...string) error {
|
||||
err := retry.
|
||||
Times(uint(timeout / delay)).
|
||||
Wait(time.Duration(delay) * time.Second).
|
||||
|
@ -58,10 +58,17 @@ func CheckPodStatus(appNs, appLabel string, timeout, delay int, clients clients.
|
|||
return errors.Errorf("Unable to find the pods with matching labels, err: %v", err)
|
||||
}
|
||||
for _, pod := range podSpec.Items {
|
||||
if string(pod.Status.Phase) != "Running" {
|
||||
return errors.Errorf("Pod is not yet in running state")
|
||||
isInState := false
|
||||
for _, state := range states {
|
||||
if string(pod.Status.Phase) == state {
|
||||
isInState = true
|
||||
break
|
||||
}
|
||||
}
|
||||
log.InfoWithValues("[Status]: The running status of Pods are as follows", logrus.Fields{
|
||||
if !isInState {
|
||||
return errors.Errorf("Pod is not yet in targeted state")
|
||||
}
|
||||
log.InfoWithValues("[Status]: The status of Pods are as follows", logrus.Fields{
|
||||
"Pod": pod.Name, "Status": pod.Status.Phase})
|
||||
}
|
||||
return nil
|
||||
|
@ -72,6 +79,11 @@ func CheckPodStatus(appNs, appLabel string, timeout, delay int, clients clients.
|
|||
return nil
|
||||
}
|
||||
|
||||
// CheckPodStatus checks the running status of the application pod
|
||||
func CheckPodStatus(appNs, appLabel string, timeout, delay int, clients clients.ClientSets) error {
|
||||
return CheckPodStatusPhase(appNs, appLabel, timeout, delay, clients, "Running")
|
||||
}
|
||||
|
||||
// CheckContainerStatus checks the status of the application container
|
||||
func CheckContainerStatus(appNs, appLabel string, timeout, delay int, clients clients.ClientSets) error {
|
||||
|
||||
|
|
|
@ -65,17 +65,18 @@ type EventDetails struct {
|
|||
|
||||
// ChaosDetails is for collecting all the global variables
|
||||
type ChaosDetails struct {
|
||||
ChaosUID clientTypes.UID
|
||||
ChaosNamespace string
|
||||
ChaosPodName string
|
||||
EngineName string
|
||||
InstanceID string
|
||||
ExperimentName string
|
||||
Timeout int
|
||||
Delay int
|
||||
AppDetail AppDetails
|
||||
ChaosDuration int
|
||||
JobCleanupPolicy string
|
||||
ChaosUID clientTypes.UID
|
||||
ChaosNamespace string
|
||||
ChaosPodName string
|
||||
EngineName string
|
||||
InstanceID string
|
||||
ExperimentName string
|
||||
Timeout int
|
||||
Delay int
|
||||
AppDetail AppDetails
|
||||
ChaosDuration int
|
||||
JobCleanupPolicy string
|
||||
ProbeImagePullPolicy string
|
||||
}
|
||||
|
||||
// AppDetails contains all the application related envs
|
||||
|
|
Loading…
Reference in New Issue