diff --git a/tests/fio/fio.yaml b/tests/fio/fio.yaml new file mode 100644 index 000000000..5b7d7d7b6 --- /dev/null +++ b/tests/fio/fio.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: fio +spec: + template: + metadata: + name: fio + labels: + name: fio + spec: + restartPolicy: Never + nodeSelector: + kubernetes.io/hostname: testNode + containers: + - name: perfrunner + image: openebs/tests-fio + command: ["/bin/bash"] + args: ["-c", "./fio_runner.sh --template file/basic-rw --size 256m --duration 60; exit 0"] + volumeMounts: + - mountPath: /datadir + name: fio-vol + tty: true + volumes: + - name: fio-vol + persistentVolumeClaim: + claimName: testClaim +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: testClaim +spec: + storageClassName: testClass + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "5G" diff --git a/tests/fio/run_litmus_test.yaml b/tests/fio/run_litmus_test.yaml new file mode 100644 index 000000000..ee6473cfd --- /dev/null +++ b/tests/fio/run_litmus_test.yaml @@ -0,0 +1,62 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: litmus + namespace: litmus +spec: + template: + metadata: + name: litmus + spec: + serviceAccountName: litmus + restartPolicy: Never + containers: + - name: ansibletest + image: openebs/ansible-runner + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: log_plays + + - name: PROVIDER_STORAGE_CLASS + value: openebs-standard + #value: local-storage + + - name: APP_NODE_SELECTOR + value: kubeminion-01 + + - name: FIO_TEST_PROFILE + value: standard-ssd + + - name: FIO_SAMPLE_SIZE + value: "128m" + + - name: FIO_TESTRUN_PERIOD + value: "60" + + command: ["/bin/bash"] + args: ["-c", "ansible-playbook ./fio/test.yaml -i /etc/ansible/hosts -v; exit 0"] + volumeMounts: + - name: logs + mountPath: /var/log/ansible + tty: true + - name: logger + image: openebs/logger + command: ["/bin/bash"] + args: ["-c", "./logger.sh -d 10 -r maya,openebs,pvc,fio; exit 0"] + volumeMounts: + - name: kubeconfig + mountPath: /root/admin.conf + subPath: admin.conf + - name: logs + mountPath: /mnt + tty: true + volumes: + - name: kubeconfig + configMap: + name: kubeconfig + - name: logs + hostPath: + path: /mnt + type: Directory + diff --git a/tests/fio/test.yaml b/tests/fio/test.yaml new file mode 100644 index 000000000..36e7357fe --- /dev/null +++ b/tests/fio/test.yaml @@ -0,0 +1,126 @@ +# TODO +# Change pod status checks to container status checks (containerStatuses) +# O/P result + +- hosts: localhost + connection: local + + vars_files: + - test_vars.yaml + + tasks: + - block: + + ## VERIFY AVAILABILITY OF SELECTED STORAGE CLASS + + - name: Check whether the provider storageclass is applied + shell: kubectl get sc {{ lookup('env','PROVIDER_STORAGE_CLASS') }} + args: + executable: /bin/bash + register: result + + ## PRE-CONDITION THE APPLICATION DEPLOYMENT SPECS WITH TEST PARAMS + + - name: Replace the app node placeholder with perf-intensive node + replace: + path: "{{ pod_yaml_alias }}" + regexp: "testNode" + replace: "{{ lookup('env','APP_NODE_SELECTOR') }}" + + - name: Replace the pvc placeholder with test param + replace: + path: "{{ pod_yaml_alias }}" + regexp: "testClaim" + replace: "{{ test_name }}" + + - name: Replace the storageclass placeholder with provider + replace: + path: "{{ pod_yaml_alias }}" + regexp: "testClass" + replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}" + + # FIO-SPECIFIC PRE-CONDITIONING + + - name: Replace the default fio profile with user-defined profile + replace: + path: "{{ pod_yaml_alias }}" + regexp: "basic-rw" + replace: "{{ lookup('env','FIO_TEST_PROFILE') }}" + + - name: Replace the data sample size with user-defined size + replace: + path: "{{ pod_yaml_alias }}" + regexp: "256m" + replace: "{{ lookup('env','FIO_SAMPLE_SIZE') }}" + + - name: Replace the default I/O test duration with user-defined period + replace: + path: "{{ pod_yaml_alias }}" + regexp: "60" + replace: "{{ lookup('env','FIO_TESTRUN_PERIOD') }}" + + ## RUN FIO WORKLOAD TEST + + - name: Deploy fio test job + shell: kubectl apply -f {{ pod_yaml_alias }} -n litmus + args: + executable: /bin/bash + + - name: Confirm fio pod status is running + shell: > + kubectl get pods -l name=fio -n litmus + --no-headers + args: + executable: /bin/bash + register: result + until: "'fio' and 'Running' in result.stdout" + delay: 120 + retries: 15 + + - name: Obtain name of fio pod + set_fact: + fio_pod_name: "{{ result.stdout.split()[0] }}" + + - name: Wait for fio pod to proceed with workload + wait_for: + timeout: 60 + + - name: Re-Check fio pod status + shell: > + kubectl get pod {{ fio_pod_name }} -n litmus + --no-headers -o custom-columns=:status.phase + args: + executable: /bin/bash + register: result + failed_when: "result.stdout not in ['Running', 'Succeeded']" + + - name: Confirm fio job completion status (poll & wait for upperbound of 60 min) + shell: > + kubectl get pod {{ fio_pod_name }} -n litmus + --no-headers -o custom-columns=:status.phase + args: + executable: /bin/bash + register: result + until: "'Succeeded' in result.stdout" + delay: 120 + retries: 30 + + - name: Verify the fio logs to check if run is complete w/o errors + shell: > + kubectl logs {{ fio_pod_name }} -n litmus + | grep -i error | cut -d ":" -f 2 + | sort | uniq + args: + executable: /bin/bash + register: result + failed_when: result.stdout != " 0," + + - set_fact: + flag: "Pass" + + rescue: + - set_fact: + flag: "Fail" + + always: + - include: test_cleanup.yaml diff --git a/tests/fio/test_cleanup.yaml b/tests/fio/test_cleanup.yaml new file mode 100644 index 000000000..ad11dee34 --- /dev/null +++ b/tests/fio/test_cleanup.yaml @@ -0,0 +1,46 @@ +--- +- name: Get pvc name to verify successful pvc deletion + shell: > + kubectl get pvc {{ test_name }} + -o custom-columns=:spec.volumeName -n litmus + --no-headers + args: + executable: /bin/bash + register: pv + +- name: Delete fio job + shell: > + source ~/.profile; kubectl delete -f {{ pod_yaml_alias }} + -n litmus + args: + executable: /bin/bash + +- name: Confirm fio pod has been deleted + shell: source ~/.profile; kubectl get pods -n litmus + args: + executable: /bin/bash + register: result + until: "'fio' not in result.stdout" + delay: 30 + retries: 12 + +- block: + - name: Confirm pvc pod has been deleted + shell: > + kubectl get pods -n litmus | grep {{ pv.stdout }} + args: + executable: /bin/bash + register: result + failed_when: "'pvc' and 'Running' in result.stdout" + delay: 30 + retries: 12 + when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')" + +- block: + - name: Remove the local persistent volume + shell: kubectl delete pv {{ pv.stdout }} + args: + executable: /bin/bash + register: result + failed_when: "'persistentvolume' and 'deleted' not in result.stdout" + when: "'local-storage' in lookup('env','PROVIDER_STORAGE_CLASS')" diff --git a/tests/fio/test_vars.yaml b/tests/fio/test_vars.yaml new file mode 100644 index 000000000..dd0070eed --- /dev/null +++ b/tests/fio/test_vars.yaml @@ -0,0 +1,17 @@ +--- +## TEST-SPECIFIC PARAMS + +test_name: fio-benchmark +pod_yaml_alias: fio.yaml + +## PROVIDER-SPECIFIC PARARMS + +# OpenEBS + +openebs_operator: + - maya-apiserver + - openebs-provisioner + +# Local Volume + +local_pv_name: local-pv diff --git a/tools/fio/fio_runner.sh b/tools/fio/fio_runner.sh index 3df1fe848..dd4fc1127 100755 --- a/tools/fio/fio_runner.sh +++ b/tools/fio/fio_runner.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash ####################################################################################################################### # Script Name : bench_runner.sh @@ -8,75 +8,96 @@ # Script Author : Karthik ####################################################################################################################### -TEST_TEMPLATES="Basic" +TEST_TEMPLATE="file/basic-rw" +TEST_DIR="datadir" +TEST_SIZE="256m" TEST_DURATION=60 -TEST_DIR="datadir1" -IS_LOCAL_DIR=1 # Function definition -# Use this to update the size of the volume -updateTemplates() { - echo "update templates" - for i in `ls templates/${TEST_TEMPLATES}/File*`; do sed -e "s|directory=/datadir1|directory=/$TEST_DIR|g" -i $i; done - for i in `ls templates/${TEST_TEMPLATES}/File*`; do sed -e "s|runtime=60|runtime=$TEST_DURATION|g" -i $i; done - for i in `ls templates/${TEST_TEMPLATES}/File*`; do cat $i; done +function show_help(){ + cat << EOF +Usage : $(basename "$0") --template + $(basename "$0") --size + $(basename "$0") --duration + $(basename "$0") --help + +-h|--help Display this help and exit +--template Select the fio template to run +--size Provide the data sample size (in MB) +--duration Duration (in sec) + +Example: ./fio_runner.sh --template file/basic-rw --size 1024m --duration 120 + +EOF } +while [[ $# -gt 0 ]] +do + case $1 in + -h|-\?|--help) # Display usage summary + show_help + exit + ;; + + --template) # Optional argument to specify fio profile + if [[ -n $2 ]]; then + TEST_TEMPLATE=$2 + if ! ls templates/$TEST_TEMPLATE > /dev/null 2>&1; then + echo "ERROR: Template specified does not exist" + exit 1 + fi + shift + else + echo 'ERROR: "--template" requires a valid fio profile' + exit 1 + fi + ;; -if [ $# -gt 0 ]; -then - echo "Setting the test templates as $1" - TEST_TEMPLATES=$1 - if [ ! -d "templates/$TEST_TEMPLATES" ]; then - echo "Specified templates do not exist." + --size) # Optional argument to specify data sample size + if [[ -n $2 ]]; then + TEST_SIZE=$2 + shift + else + echo 'ERROR: "--size" requires a valid data sample size in MB' + exit 1 + fi + ;; + + --duration) # Optional argument to specify fio run duration + if [[ -n $2 ]]; then + TEST_DURATION=$2 + shift + else + echo 'ERROR: "--duration" requires a valid time period in sec' + exit 1 + fi + ;; + + --) # End of all options + shift + break + ;; + + *) # Default case: If no options, so break out of the loop + break + esac + shift + +done + +#Verify that the datadir used by the templates is mounted +if ! df -h -P | grep -q datadir > /dev/null 2>&1; then + echo -e "datadir not mounted successfully, exiting \n" exit 1 - fi fi -if [ $# -gt 1 ]; -then - echo "Setting the duration for tests as $2" - TEST_DURATION=$2 -fi - -if [ $# -gt 2 ]; -then - echo "Setting the test to local directory /tmp/$3" - TEST_DIR="tmp/$3" - IS_LOCAL_DIR=2 -fi - - -#Verify that the datadir1 used by the templates is mounted -if [ 2 -ne $IS_LOCAL_DIR ]; then - df -h -P | grep -q datadir1 - if [ `echo $?` -ne 0 ]; then - echo -e "datadir1 not mounted successfully, exiting \n" - exit - else - echo "datadir1 mounted successfully" - fi -else - mkdir -p /$TEST_DIR - if [ `echo $?` -ne 0 ]; then - echo -e "/$TEST_DIR could not be created, exiting \n" - exit - fi - rm -rf /$TEST_DIR/* - echo "using /$TEST_DIR for testing" -fi - -updateTemplates - -# Start vdbench I/O iterating through each template file +# Start fio I/O iterating through each template file timestamp=`date +%d%m%Y_%H%M%S` -echo -e "Running $TEST_TEMPLATES Workloads\n" - -for i in `ls templates/${TEST_TEMPLATES}/ | cut -d "/" -f 3` +for i in `ls templates/${TEST_TEMPLATE}` do - echo "######## Starting workload -- $i#######" - fio --eta-newline=2s templates/${TEST_TEMPLATES}/$i - echo "######## Ended workload -- $i#######" + profile=$(basename $i) + echo -e "\nRunning $profile test with size=$TEST_SIZE, runtime=$TEST_DURATION... Wait for results !!\n" + fio $i --size=$TEST_SIZE --runtime=$TEST_DURATION --output-format=json done diff --git a/tools/fio/templates/Basic/File-BasicReadWrite b/tools/fio/templates/file/basic-rw similarity index 57% rename from tools/fio/templates/Basic/File-BasicReadWrite rename to tools/fio/templates/file/basic-rw index 5eb31dae5..5b9631f81 100644 --- a/tools/fio/templates/Basic/File-BasicReadWrite +++ b/tools/fio/templates/file/basic-rw @@ -1,10 +1,8 @@ [global] -directory=/datadir1 -filesize=16m - +directory=/datadir +filename=basic.test.file [basic-readwrite] rw=readwrite bs=4k time_based=1 -runtime=60 diff --git a/tools/fio/templates/file/standard-ssd b/tools/fio/templates/file/standard-ssd new file mode 100644 index 000000000..2819a5bee --- /dev/null +++ b/tools/fio/templates/file/standard-ssd @@ -0,0 +1,32 @@ +# Do some important numbers on SSD drives, to gauge what kind of +# performance you might get out of them. +# +# Sequential read and write speeds are tested, these are expected to be +# high. Random reads should also be fast, random writes are where crap +# drives are usually separated from the good drives. +# +# This uses a queue depth of 32 (As this is more representative of mid-heavy disk use) + +[global] +bs=4k +ioengine=libaio +iodepth=1 +direct=1 +directory=/datadir +filename=ssd.test.file + +[seq-read] +rw=read +stonewall + +[rand-read] +rw=randread +stonewall + +[seq-write] +rw=write +stonewall + +[rand-write] +rw=randwrite +stonewall