chore[cleanup]: remove unused files (#606)

Signed-off-by: nsathyaseelan <sathyaseelan.n@mayadata.io>
This commit is contained in:
sathyaseelan 2019-04-30 16:40:01 +05:30 committed by giri
parent 3a9cd94f02
commit 58a4a40b0c
179 changed files with 0 additions and 38721 deletions

95
Gopkg.lock generated
View File

@ -1,95 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:63e35f61c1d659416dc2d27d873689a9aef81701fd077fa4686e3d6edccac3dc"
name = "github.com/DATA-DOG/godog"
packages = [
".",
"colors",
"gherkin",
]
pruneopts = "UT"
revision = "0371765570d36374bef4ab9f62ed0491f0862a3c"
version = "v0.7.6"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "UT"
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
digest = "1:bc38c7c481812e178d85160472e231c5e1c9a7f5845d67e23ee4e706933c10d8"
name = "github.com/golang/mock"
packages = ["gomock"]
pruneopts = "UT"
revision = "c34cdb4725f4c3844d095133c6e40e448b86589b"
version = "v1.1.1"
[[projects]]
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:c40d65817cdd41fac9aa7af8bed56927bb2d6d47e4fea566a74880f5c2b1c41e"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require",
]
pruneopts = "UT"
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
branch = "master"
digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
name = "golang.org/x/net"
packages = ["context"]
pruneopts = "UT"
revision = "04a2e542c03f1d053ab3e4d6e5abcd4b66e2be8e"
[[projects]]
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/DATA-DOG/godog",
"github.com/DATA-DOG/godog/gherkin",
"github.com/ghodss/yaml",
"github.com/golang/mock/gomock",
"github.com/pkg/errors",
"github.com/stretchr/testify/require",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,38 +0,0 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/DATA-DOG/godog"
version = "0.7.6"
[prune]
go-tests = true
unused-packages = true
[[constraint]]
name = "github.com/golang/mock"
version = "1.1.1"

View File

@ -1,57 +0,0 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: jupyter-server
spec:
replicas: 1
template:
metadata:
labels:
lkey: lvalue
spec:
containers:
- name: jupyter-server
imagePullPolicy: Always
image: satyamz/docker-jupyter:v0.4
ports:
- containerPort: 8888
env:
- name: GIT_REPO
value: https://github.com/vharsh/plot-demo.git
volumeMounts:
- name: data-vol
mountPath: /mnt/data
volumes:
- name: data-vol
persistentVolumeClaim:
claimName: testclaim
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: testclaim
spec:
storageClassName: testclass
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5G
---
apiVersion: v1
kind: Service
metadata:
name: jupyter-service
labels:
lkey: lvalue
spec:
ports:
- name: ui
port: 8888
nodePort: 32424
protocol: TCP
selector:
lkey: lvalue
sessionAffinity: None
type: NodePort

View File

@ -1,44 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: litmus-jupyter-
namespace: litmus
spec:
template:
metadata:
name: litmus
labels:
app: jupyter-deployment-litmus
spec:
serviceAccountName: litmus
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/ansible-runner:ci
imagePullPolicy: Always
env:
- name: ANSIBLE_STDOUT_CALLBACK
#value: log_plays, actionable, default
value: default
- name: PROVIDER_STORAGE_CLASS
# Supported values: openebs-standard, local-storage
value: openebs-standard
- name: APP_PVC
value: jupyter-data-vol-claim
# Application label
- name: APP_LABEL
value: 'app=jupyter-server'
# Application namespace
- name: APP_NAMESPACE
value: app-jupyter-ns
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./jupyter/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"]

View File

@ -1,40 +0,0 @@
---
- hosts: localhost
connection: local
vars_files:
- test_vars.yml
tasks:
- block:
## Generating the testname for deployment
- include_tasks: /common/utils/create_testname.yml
## RECORD START-OF-TEST IN LITMUS RESULT CR
- include_tasks: /common/utils/update_litmus_result_resource.yml
vars:
status: 'SOT'
## Actual Test to deploy Jupyter NoteBook on k8s
## Creating namespaces and making the application for deployment
- include_tasks: /common/utils/pre_create_app_deploy.yml
## Deploying the application
- include_tasks: /common/utils/deploy_single_app.yml
vars:
delay: 60
retries: 20
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
## RECORD END-OF-TEST IN LITMUS RESULT CR
- include_tasks: /common/utils/update_litmus_result_resource.yml
vars:
status: 'EOT'

View File

@ -1,7 +0,0 @@
# Test-specific parametres
application_deployment: jupyter_openebs.yml
app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
app_label: "{{ lookup('env','APP_LABEL') }}"
test_name: jupyter-deployment
application_name: "jupyter_Notebook"

View File

@ -1,21 +0,0 @@
### Test Minio deployment in Kubernetes
#### Use-Case
Feature: Test deployment of Minio on Kubernetes PV
In order to test deployment of Minio on Kubernetes PV
As an end user
I need to be able to launch Minio on Kubernetes PV
#### Implementation
- Step 1: Describe the scenario(s) in **e2e.feature** file
- Step 2: Run **godog e2e.feature**
- Step 3: Implement undefined steps (also referred to as snippets) in **e2e_test.go** file
- Step 4: Re-Run **godog e2e.feature**
#### Best Practices
- 1: Make use of standard go practices
- 2: Transform the usecase into structure(s) & its properties
- 3: Now fit the godog generated function snippets into above structure' methods
#### References
- https://github.com/DATA-DOG/godog

View File

@ -1,76 +0,0 @@
# For k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1
apiVersion: apps/v1beta2
kind: Deployment
metadata:
# This name uniquely identifies the Deployment
name: odm-minio
namespace: litmus
spec:
selector:
matchLabels:
app: minio
strategy:
type: Recreate
template:
metadata:
labels:
# Label is used as selector in the service.
app: minio
spec:
# Refer to the PVC
volumes:
- name: storage
persistentVolumeClaim:
# Name of the PVC created earlier
claimName: odm-minio
containers:
- name: minio
# Pulls the default Minio image from Docker Hub
image: minio/minio:latest
args:
- server
- /storage
env:
# Minio access key and secret key
- name: MINIO_ACCESS_KEY
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
ports:
- containerPort: 9000
hostPort: 9000
# Mount the volume into the pod
volumeMounts:
- name: storage # must match the volume name, above
mountPath: "/home/username"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: odm-minio
namespace: litmus
labels:
app: minio-storage-claim
spec:
storageClassName: openebs-standalone
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10G
---
apiVersion: v1
kind: Service
metadata:
name: odm-minio
namespace: litmus
spec:
type: LoadBalancer
ports:
- port: 9000
nodePort: 32701
protocol: TCP
selector:
app: minio
sessionAffinity: None
type: NodePort

View File

@ -1,19 +0,0 @@
Feature: Test deployment of Minio on Kubernetes PV
In order to test deployment of Minio on Kubernetes PV
As an end user
I need to be able to launch Minio on Kubernetes PV
Scenario: launch Minio on PV
Given I have a kubernetes cluster with volume operator installed
When I launch minio application on volume
Then wait for "180s"
And verify minio application is launched successfully on volume
And verify PVC is bound
And verify PV is deployed
Scenario: delete Minio instance
Given minio application is launched successfully on volume
When I delete minio instance along with volume
Then wait for "60s"
And verify minio application is deleted
And verify PV is deleted

View File

@ -1,274 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"github.com/DATA-DOG/godog"
"github.com/DATA-DOG/godog/gherkin"
"github.com/openebs/litmus/pkg/kubectl"
"github.com/openebs/litmus/pkg/time"
"github.com/openebs/litmus/pkg/verify"
)
// errorIdentity is a type to set error identities
type errorIdentity string
const (
// OperatorVerifyFileEI stores the actual error during load of volume
// operator verify file
OperatorVerifyFileEI errorIdentity = "operator-verify-file-err"
// ApplicationVerifyFileEI stores the actual error during load of application
// verify file
ApplicationVerifyFileEI errorIdentity = "application-verify-file-err"
// VolumeVerifyFileEI stores the actual error during load of volume verify
// file
VolumeVerifyFileEI errorIdentity = "volume-verify-file-err"
)
const (
// OperatorMF enables litmus to run checks & actions based on this volume
// operator verify file
OperatorMF verify.VerifyFile = "/etc/e2e/operator-verify/operator-verify.yaml"
// ApplicationMF enables litmus to run checks & actions based on this application
// verify file
ApplicationMF verify.VerifyFile = "/etc/e2e/application-verify/application-verify.yaml"
// VolumeMF enables litmus to run checks & actions based on this volume verify
// file
VolumeMF verify.VerifyFile = "/etc/e2e/volume-verify/volume-verify.yaml"
)
const (
// ApplicationKF is launched via kubectl
ApplicationKF kubectl.KubectlFile = "/etc/e2e/application-launch/application-launch.yaml"
)
const (
// PVCAlias is the alias name given to a application's pvc
//
// This is the text which is typically understood by the end user. This text
// which will be set in the verify file against a particular component.
// Verification logic will filter the component based on this alias & run
// various checks &/or actions
PVCAlias string = "pvc"
)
type MinioLaunch struct {
// appVerifier instance enables verification of application components
appVerifier verify.AllVerifier
// volVerifier instance enables verification of persistent volume components
volVerifier verify.AllVerifier
// operatorVerifier instance enables verification of volume operator components
operatorVerifier verify.DeployRunVerifier
// errors hold the previous error(s)
errors map[errorIdentity]error
}
func (e2e *MinioLaunch) withOperatorVerifier(f *gherkin.Feature) {
o, err := verify.NewKubeInstallVerify(OperatorMF)
if err != nil {
e2e.errors[OperatorVerifyFileEI] = err
return
}
e2e.operatorVerifier = o
}
func (e2e *MinioLaunch) withApplicationVerifier(f *gherkin.Feature) {
a, err := verify.NewKubeInstallVerify(ApplicationMF)
if err != nil {
e2e.errors[ApplicationVerifyFileEI] = err
return
}
e2e.appVerifier = a
}
func (e2e *MinioLaunch) withVolumeVerifier(f *gherkin.Feature) {
v, err := verify.NewKubeInstallVerify(VolumeMF)
if err != nil {
e2e.errors[VolumeVerifyFileEI] = err
return
}
e2e.volVerifier = v
}
func (e2e *MinioLaunch) tearDown(f *gherkin.Feature) {
kubectl.New().Run([]string{"delete", "-f", string(ApplicationKF)})
}
func (e2e *MinioLaunch) iHaveAKubernetesClusterWithVolumeOperatorInstalled() (err error) {
kconnVerifier := verify.NewKubeConnectionVerify()
// checks if kubernetes cluster is available & is connected
_, err = kconnVerifier.IsConnected()
if err != nil {
return
}
if e2e.operatorVerifier == nil {
err = fmt.Errorf("nil operator verifier: possible error '%s'", e2e.errors[OperatorVerifyFileEI])
return
}
// checks if operator is deployed
_, err = e2e.operatorVerifier.IsDeployed()
if err != nil {
return
}
// checks if operator is running
_, err = e2e.operatorVerifier.IsRunning()
return
}
func (e2e *MinioLaunch) waitFor(duration string) (err error) {
err = time.WaitFor(duration)
return
}
func (e2e *MinioLaunch) iLaunchMinioApplicationOnVolume() (err error) {
// do a kubectl apply of application yaml
_, err = kubectl.New().Run([]string{"apply", "-f", string(ApplicationKF)})
return
}
func (e2e *MinioLaunch) verifyMinioApplicationIsLaunchedSuccessfullyOnVolume() (err error) {
err = e2e.verifyApplicationIsRunning()
if err != nil {
return
}
// check if volume is running
return e2e.verifyAllVolumeReplicasAreRunning()
}
func (e2e *MinioLaunch) verifyApplicationIsRunning() (err error) {
if e2e.appVerifier == nil {
err = fmt.Errorf("nil application verifier: possible error '%s'", e2e.errors[ApplicationVerifyFileEI])
return
}
// is application deployed
_, err = e2e.appVerifier.IsDeployed()
if err != nil {
return
}
// is application running
_, err = e2e.appVerifier.IsRunning()
return
}
func (e2e *MinioLaunch) verifyAllVolumeReplicasAreRunning() (err error) {
if e2e.volVerifier == nil {
err = fmt.Errorf("nil volume verifier: possible error '%s'", e2e.errors[VolumeVerifyFileEI])
return
}
// is volume deployed
_, err = e2e.volVerifier.IsDeployed()
if err != nil {
return
}
// is volume running
_, err = e2e.volVerifier.IsRunning()
return
}
func (e2e *MinioLaunch) minioApplicationIsLaunchedSuccessfullyOnVolume() (err error) {
// check if application is running
return e2e.verifyMinioApplicationIsLaunchedSuccessfullyOnVolume()
}
func (e2e *MinioLaunch) verifyPVCIsBound() (err error) {
if e2e.appVerifier == nil {
err = fmt.Errorf("nil application verifier: possible error '%s'", e2e.errors[ApplicationVerifyFileEI])
return
}
// is condition satisfied
_, err = e2e.appVerifier.IsCondition(PVCAlias, verify.PVCBoundCond)
return
}
func (e2e *MinioLaunch) verifyPVIsDeployed() (err error) {
if e2e.volVerifier == nil {
err = fmt.Errorf("nil volume verifier: possible error '%s'", e2e.errors[VolumeVerifyFileEI])
return
}
// is volume deployed
_, err = e2e.volVerifier.IsDeployed()
if err != nil {
return
}
// is volume running
_, err = e2e.volVerifier.IsRunning()
return
}
func (e2e *MinioLaunch) iDeleteMinioInstanceAlongWithVolume() (err error) {
kubectl.New().Run([]string{"delete", "-f", string(ApplicationKF)})
return
}
func (e2e *MinioLaunch) verifyMinioApplicationIsDeleted() (err error) {
if e2e.appVerifier == nil {
err = fmt.Errorf("nil application verifier: possible error '%s'", e2e.errors[ApplicationVerifyFileEI])
return
}
// is application deleted
_, err = e2e.appVerifier.IsDeleted()
return
}
func (e2e *MinioLaunch) verifyPVIsDeleted() (err error) {
if e2e.volVerifier == nil {
err = fmt.Errorf("nil volume verifier: possible error '%s'", e2e.errors[VolumeVerifyFileEI])
return
}
// is volume deployed
_, err = e2e.volVerifier.IsDeleted()
return
}
func FeatureContext(s *godog.Suite) {
e2e := &MinioLaunch{
errors: map[errorIdentity]error{},
}
s.BeforeFeature(e2e.withOperatorVerifier)
s.BeforeFeature(e2e.withApplicationVerifier)
s.BeforeFeature(e2e.withVolumeVerifier)
s.AfterFeature(e2e.tearDown)
s.Step(`^I have a kubernetes cluster with volume operator installed$`, e2e.iHaveAKubernetesClusterWithVolumeOperatorInstalled)
s.Step(`^wait for "([^"]*)"$`, e2e.waitFor)
s.Step(`^I launch minio application on volume$`, e2e.iLaunchMinioApplicationOnVolume)
s.Step(`^verify minio application is launched successfully on volume$`, e2e.verifyMinioApplicationIsLaunchedSuccessfullyOnVolume)
s.Step(`^verify PVC is bound$`, e2e.verifyPVCIsBound)
s.Step(`^verify PV is deployed$`, e2e.verifyPVIsDeployed)
s.Step(`^I delete minio instance along with volume$`, e2e.iDeleteMinioInstanceAlongWithVolume)
s.Step(`^verify minio application is deleted$`, e2e.verifyMinioApplicationIsDeleted)
s.Step(`^verify PV is deleted$`, e2e.verifyPVIsDeleted)
s.Step(`^minio application is launched successfully on volume$`, e2e.minioApplicationIsLaunchedSuccessfullyOnVolume)
}

View File

@ -1,123 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
generateName: odm-operator-verify-
namespace: litmus
labels:
name: odm-operator-verify
test: deploy-minio
data:
config: |-
components:
- kind: serviceaccount
name: openebs-maya-operator
namespace: default
- kind: clusterrole
name: openebs-maya-operator
namespace: default
- kind: clusterrolebinding
name: openebs-maya-operator
namespace: default
- kind: pod
labels: name=maya-apiserver
namespace: default
- kind: service
name: maya-apiserver-service
namespace: default
- kind: pod
labels: name=openebs-provisioner
namespace: default
- kind: sc
name: openebs-standalone
---
apiVersion: v1
kind: ConfigMap
metadata:
name: odm-application-verify
namespace: litmus
labels:
name: odm-application-verify
test: deploy-minio
data:
config: |-
components:
- kind: service
name: odm-minio
- kind: deploy
name: odm-minio
- kind: pod
labels: app=minio
- kind: pvc
name: odm-minio
alias: pvc
---
apiVersion: v1
kind: ConfigMap
metadata:
name: odm-volume-verify
namespace: litmus
labels:
name: odm-volume-verify
test: deploy-minio
data:
config: |-
components:
- kind: pod
labels: openebs/controller=jiva-controller
- kind: pod
labels: openebs/replica=jiva-replica
---
apiVersion: batch/v1
kind: Job
metadata:
#name: odm-test-the-feature
name: litmus
namespace: litmus
labels:
#name: odm-test-the-feature
name: litmus
test: deploy-minio
spec:
template:
spec:
serviceAccountName: litmus
containers:
- name: odm-test-the-feature
image: openebs/godog-runner:ci
command: ["/bin/sh", "./hack/godog.sh", "./tests/minio/deploy_minio"]
volumeMounts:
- mountPath: /etc/e2e/operator-verify
name: odm-operator-verify
- mountPath: /etc/e2e/application-verify
name: odm-application-verify
- mountPath: /etc/e2e/volume-verify
name: odm-volume-verify
- mountPath: /etc/e2e/application-launch
name: odm-application-launch
volumes:
- name: odm-operator-verify
configMap:
name: odm-operator-verify
items:
- key: config
path: operator-verify.yaml
- name: odm-application-verify
configMap:
name: odm-application-verify
items:
- key: config
path: application-verify.yaml
- name: odm-volume-verify
configMap:
name: odm-volume-verify
items:
- key: config
path: volume-verify.yaml
- name: odm-application-launch
configMap:
name: odm-application-launch
items:
- key: config
path: application-launch.yaml
restartPolicy: Never

View File

@ -1,28 +0,0 @@
## Checking MySQL data persistence upon forced reschedule (eviction)
### Objective
- This test checks MySQL data persistence with a specified storage solution after the application is subjected to
different types of failures, induced via "chaos" operations. Currently, the following chaos types are supported by the test job:
- APP_POD_KILL/PUMBA : The MySQL pod is terminated abruptly (via SIGKILL) , multiple times, over a perdiod of 120s using Pumba
- APP_POD_EVICT/KUBECTL : The MySQL and other pods on the application node are forcefully evicted by Kubernetes via resource taints
- APP_NODE_DRAIN/KUBECTL : The application node is taken down gracefully via cordon & drain process
### Considerations
- This test requires a multi-node Kubernetes cluster
*Note:* The min. count depends on individual storage solution's HA policies. For example OpenEBS needs 3-node cluster
- This test simulates node loss, with original cluster state being reverted to at the end of test
- The application reschedule time is also impacted by the amount of delay between disk attach and mount attempts by Kubernetes
### Steps to Run
[Pre-Requisites](https://github.com/openebs/litmus#running-a-specific-test)
- View the following test info on the litmus node at /mnt/mysql_data_persistence :
- Pod logs at "Logstash_<timestamp>_.tar
- Playbook run logs at "hosts/127.0.0.1"
- Result at "result.json"

View File

@ -1,73 +0,0 @@
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: percona
labels:
name: percona
spec:
replicas: 1
selector:
matchLabels:
name: percona
template:
metadata:
labels:
name: percona
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
name: litmus
topologyKey: kubernetes.io/hostname
containers:
- resources:
limits:
cpu: 0.5
name: percona
image: percona
imagePullPolicy: IfNotPresent
args:
- "--ignore-db-dir"
- "lost+found"
env:
- name: MYSQL_ROOT_PASSWORD
value: k8sDem0
ports:
- containerPort: 3306
name: percona
volumeMounts:
- mountPath: /var/lib/mysql
name: data-vol
volumes:
- name: data-vol
persistentVolumeClaim:
claimName: testClaim
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: testClaim
spec:
storageClassName: testClass
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5G
---
apiVersion: v1
kind: Service
metadata:
name: percona-mysql
labels:
name: percona-mysql
spec:
ports:
- port: 3306
targetPort: 3306
selector:
name: percona

View File

@ -1,32 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: litmus-mysql-data-persistence-
namespace: litmus
spec:
template:
metadata:
labels:
name: litmus
spec:
serviceAccountName: litmus
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/ansible-runner:ci
env:
- name: ANSIBLE_STDOUT_CALLBACK
#value: log_plays
value: default
- name: PROVIDER_STORAGE_CLASS
# Supported values: openebs-standard, local-storage
value: openebs-standard
- name: CHAOS_TYPE
# Supported values : APP_POD_EVICT/KUBECTL, APP_NODE_DRAIN/KUBECTL, APP_POD_KILL/PUMBA
value: "APP_POD_KILL/PUMBA"
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./percona/tests/mysql_data_persistence/test.yml -i /etc/ansible/hosts -v; exit 0"]

View File

@ -1,148 +0,0 @@
# TODO
# Change pod status checks to container status checks (containerStatuses)
# O/P result
- hosts: localhost
connection: local
vars_files:
- test_vars.yml
tasks:
- block:
## VERIFY AVAILABILITY OF SELECTED TEST JOB PARAMS
- name: Check whether the provider storageclass is applied
shell: kubectl get sc {{ lookup('env','PROVIDER_STORAGE_CLASS') }}
args:
executable: /bin/bash
register: result
- name: Get the chaos type from test job
set_fact:
chaos_util: "{{ lookup('env','CHAOS_TYPE') }}"
- name: Verify if chaos type is supported by the test
fail:
msg: "Unsupported chaos type"
when: "chaos_util not in supported_chaos_types"
## PRE-CONDITION THE APPLICATION DEPLOYMENT SPECS WITH TEST PARAMS
- name: Replace the pvc placeholder with test param
replace:
path: "{{ pod_yaml_alias }}"
regexp: "testClaim"
replace: "{{ test_name }}"
- name: Replace the storageclass placeholder with provider
replace:
path: "{{ pod_yaml_alias }}"
regexp: "testClass"
replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
## RUN APPLICATION PERSISTENCE TEST
- name: Deploy percona mysql pod
shell: kubectl apply -f {{ pod_yaml_alias }} -n litmus
args:
executable: /bin/bash
- name: Confirm mysql pod status is running
shell: >
kubectl get pods -l name=percona -n litmus
--no-headers
args:
executable: /bin/bash
register: result
until: "'percona' and 'Running' in result.stdout"
delay: 60
retries: 15
- name: Obtain name of mysql pod
set_fact:
percona_pod_name: "{{ result.stdout.split()[0] }}"
- name: Check for successful database init
shell: >
kubectl logs {{ percona_pod_name }} -n litmus
| grep 'ready for connections' | wc -l
args:
executable: /bin/bash
register: result
until: result.stdout == "2"
delay: 10
retries: 30
- name: Create some test data in the mysql database
include_tasks: "/common/utils/mysql_data_persistence.yml"
vars:
status: 'LOAD'
ns: 'litmus'
pod_name: "{{ percona_pod_name }}"
dbuser: 'root'
dbpassword: 'k8sDem0'
dbname: 'tdb'
- include: /chaoslib/kubectl/pod_evict_by_taint.yaml
app: "{{ percona_pod_name }}"
app_ns: 'litmus'
taint: "node.kubernetes.io/out-of-disk"
when: chaos_util == "APP_POD_EVICT/KUBECTL"
- include: /chaoslib/kubectl/cordon_drain_node.yaml
app: "{{ percona_pod_name }}"
app_ns: 'litmus'
when: chaos_util == "APP_NODE_DRAIN/KUBECTL"
- include_tasks: /chaoslib/pumba/pod_failure_by_sigkill.yaml
vars:
action: "killapp"
app_pod: "{{ percona_pod_name }}"
namespace: 'litmus'
when: chaos_util == "APP_POD_KILL/PUMBA"
- name: Confirm mysql pod status is running
shell: >
kubectl get pods -l name=percona -n litmus
--no-headers
args:
executable: /bin/bash
register: result
until: "'percona' and 'Running' in result.stdout"
delay: 30
retries: 20
- name: Obtain name of mysql pod
set_fact:
percona_pod_name: "{{ result.stdout.split()[0] }}"
- name: Verify mysql data persistence
include_tasks: "/common/utils/mysql_data_persistence.yml"
vars:
status: 'VERIFY'
ns: 'litmus'
pod_name: "{{ percona_pod_name }}"
dbuser: 'root'
dbpassword: 'k8sDem0'
dbname: 'tdb'
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
- name: Create results file
lineinfile:
create: yes
state: present
path: '/var/log/ansible/result.json'
line: '{ "Testname" : {{ test_name | to_json }}, "status" : {{ flag | to_json }} }'
- include: test_cleanup.yml

View File

@ -1,43 +0,0 @@
---
- name: Get pvc name to verify successful pvc deletion
shell: >
kubectl get pvc {{ test_name }}
-o custom-columns=:spec.volumeName -n litmus
--no-headers
args:
executable: /bin/bash
register: pv
- name: Delete percona mysql pod
shell: >
source ~/.profile; kubectl delete -f {{ pod_yaml_alias }}
-n litmus
args:
executable: /bin/bash
- name: Confirm percona pod has been deleted
shell: source ~/.profile; kubectl get pods -n litmus
args:
executable: /bin/bash
register: result
until: "'percona' not in result.stdout"
delay: 30
retries: 12
- block:
- name: Confirm pvc pod has been deleted
shell: >
kubectl get pods -n litmus | grep {{ pv.stdout }}
args:
executable: /bin/bash
register: result
failed_when: "'pvc' and 'Running' in result.stdout"
delay: 30
retries: 12
when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')"
- include_tasks: /chaoslib/pumba/pod_failure_by_sigkill.yaml
vars:
action: "deletepumba"
namespace: 'litmus'
when: chaos_util == "APP_POD_KILL/PUMBA"

View File

@ -1,11 +0,0 @@
---
## TEST-SPECIFIC PARAMS
test_name: mysql-data-persistence
pod_yaml_alias: mysql.yml
supported_chaos_types:
- APP_POD_EVICT/KUBECTL
- APP_NODE_DRAIN/KUBECTL
- APP_POD_KILL/PUMBA

View File

@ -1,54 +0,0 @@
This is a sample test to illustrate/templatize how scenarios can be coded using ansible in Litmus.
### Understanding the ansible-based litmus test
- Objective: Obtain a TPC-C benchmark against a MySQL (percona) database created on specified kubernetes storage solution
- Components: The ansible-based litmus test has the following pieces:-
- The test code itself - constructed as playbooks (`test.yaml`). Most steps are simple kubectl commands.
- The auxiliary taskfiles invoked in the main playbook - such as `test_vars.yaml`, `test_cleanup.yaml`
Note: Tests may also include additional taskfile such as `test_prerequisites.yaml`
- Deployment/statefulset specification used in the test scenario (`mysql.yaml`). By default, this consists of a placeholder
for the storageClass which will be replaced with the desired provider as part of test execution
- Test kubernetes job: The actual "test artifact" which is deployed to run the litmus test (`run_litmus_test.yaml`).
This job runs the `ansible-runner` container which executes the aforementioned test code with a `logger sidecar`.
Notes:
- By default, the OpenEBS storage class is passed as an ENV variable to the `ansible_runner` container. Replace with SC of
desired storage provider. However, ensure the provider is already setup on the cluster
- Update the application node selector ENV variable to schedule the app on node with desired disk resources. In case of
local persistent volumes, ensure that the node selected also has the PV created.
- The test folder may also contain several `setup_*.yaml` config maps as necessary inputs to the test job
It is rcommended that the naming conventions of the test playbooks, setup config maps & test kubernetes jobs are maintained
as described above in order to aid batch runs of all the litmus tests by the executor frameworks
### Running the test
[Pre-Requisites](https://github.com/openebs/litmus#running-a-specific-test)
The test can be run using the following command:
`kubectl create -f run_litmus_test.yaml`
### Viewing test results & logs
The test is completed upon the kubernetes job completion. at the end of which the `ansible_runner` & `logger pods` are deleted.
Currently, the test results and logs are available in the `/mnt` folder of the node in which the job is scheduled. These include
the test result, pod logs, playbook logs & node's systemd (kubelet) logs if available.
### Considerations
All the litmus tests harness the enormous potential of `kubectl` which we believe is more than just a CLI tool

View File

@ -1,74 +0,0 @@
##############################################################################
# This YAML runs a TPC-C workload against a percona database container #
# Obtain the TransactionsPerMinuteCount (TpmC) using the given command #
# #
# kubectl logs percona -c tpcc-bench | awk '/TpmC/ && !/^<TpmC>/{print $0}' #
# #
##############################################################################
---
apiVersion: v1
kind: Pod
metadata:
name: percona
labels:
name: percona
spec:
## Ensures the pod enters "completed" state after
## the tpcc-workload test duration
restartPolicy: Never
## Ensures the pod is scheduled on desired node
nodeSelector:
kubernetes.io/hostname: testNode
containers:
# Runs latest percona database container
- name: percona
image: openebs/tests-custom-percona:latest
args:
- "--ignore-db-dir"
- "lost+found"
env:
- name: MYSQL_ROOT_PASSWORD
value: k8sDem0
ports:
- containerPort: 3306
name: percona
volumeMounts:
- mountPath: /var/lib/mysql
name: mysql-path
# Runs a sample tpc-c workload sidecar container
# Wait for 90s to wait for MySQL to accept connections
- name: tpcc-bench
image: openebs/tests-tpcc-client
command: ["/bin/bash"]
args: ["-c", "./tpcc-runner.sh 127.0.0.1 tpcc.conf; exit 0"]
volumeMounts:
- name: tpcc-configmap
mountPath: /tpcc-mysql/tpcc.conf
subPath: tpcc.conf
tty: true
volumes:
- name: mysql-path
persistentVolumeClaim:
claimName: testClaim
- name: tpcc-configmap
configMap:
name: tpcc-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: testClaim
spec:
storageClassName: testClass
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5G

View File

@ -1,31 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
generateName: litmus-mysql-storage-benchmark-
namespace: litmus
spec:
template:
metadata:
labels:
name: litmus
spec:
serviceAccountName: litmus
restartPolicy: Never
containers:
- name: ansibletest
image: openebs/ansible-runner:ci
env:
- name: ANSIBLE_STDOUT_CALLBACK
#value: log_plays
value: default
- name: PROVIDER_STORAGE_CLASS
value: openebs-standard
#value: local-storage
- name: APP_NODE_SELECTOR
value: kubeminion-01
command: ["/bin/bash"]
args: ["-c", "ansible-playbook ./percona/tests/mysql_storage_benchmark/test.yml -i /etc/ansible/hosts -v; exit 0"]

View File

@ -1,131 +0,0 @@
# TODO
# Change pod status checks to container status checks (containerStatuses)
# O/P result
- hosts: localhost
connection: local
vars_files:
- test_vars.yml
tasks:
- block:
## VERIFY AVAILABILITY OF SELECTED STORAGE CLASS
- name: Check whether the provider storageclass is applied
shell: kubectl get sc {{ lookup('env','PROVIDER_STORAGE_CLASS') }}
args:
executable: /bin/bash
register: result
## PERFORM PROVIDER SPECIFIC CHECKS FOR PRE-REQUISITES
- block:
- name: OpenEBS - Check whether operator pods are running
shell: >
kubectl get pod -l name={{ item }} --no-headers
-o custom-columns=:status.phase --all-namespaces
args:
executable: /bin/bash
register: result
until: "result.stdout == 'Running'"
delay: 10
retries: 12
with_items: "{{ openebs_operator }}"
when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')"
- block:
- name: Local PV - Check whether the local volume is created
shell: >
kubectl get pv {{ local_pv_name }} --no-headers
-o custom-columns=:spec.local
args:
executable: /bin/bash
register: local_volume_path
when: "'local-storage' in lookup('env','PROVIDER_STORAGE_CLASS')"
## PRE-CONDITION THE APPLICATION DEPLOYMENT SPECS WITH TEST PARAMS
- name: Replace the app node placeholder with perf-intensive node
replace:
path: "{{ pod_yaml_alias }}"
regexp: "testNode"
replace: "{{ lookup('env','APP_NODE_SELECTOR') }}"
- name: Replace the pvc placeholder with test param
replace:
path: "{{ pod_yaml_alias }}"
regexp: "testClaim"
replace: "{{ test_name }}"
- name: Replace the storageclass placeholder with provider
replace:
path: "{{ pod_yaml_alias }}"
regexp: "testClass"
replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
## RUN APPLICATION WORKLOAD TEST
- name: Display TPCC parameters
shell: cat tpcc.conf
register: result
- name: Obtain TPCC run duration from config
set_fact:
mysql_load_duration: "{{ (result.stdout | from_json).run_duration }}"
- name: Create config map for TPCC properties
shell: >
kubectl create configmap tpcc-config --from-file=tpcc.conf
-n litmus
args:
executable: /bin/bash
- name: Deploy percona mysql pod
shell: kubectl apply -f {{ pod_yaml_alias }} -n litmus
args:
executable: /bin/bash
- name: Confirm pod status is running
shell: >
kubectl get pods -l name=percona -n litmus
--no-headers -o custom-columns=:status.phase
args:
executable: /bin/bash
register: result
until: "result.stdout == 'Running'"
delay: 120
retries: 15
- name: Wait for I/O completion
wait_for:
timeout: "{{ mysql_load_duration }}"
- name: Confirm pod status is completed
shell: >
kubectl get pods -l name=percona -n litmus
--no-headers
args:
executable: /bin/bash
register: result
until: "'Completed' in result.stdout"
delay: 120
retries: 15
- set_fact:
flag: "Pass"
rescue:
- set_fact:
flag: "Fail"
always:
- name: Create results file
lineinfile:
create: yes
state: present
path: '/var/log/ansible/result.json'
line: '{ "Testname" : {{ test_name | to_json }}, "status" : {{ flag | to_json }} }'
- include: test_cleanup.yaml

View File

@ -1,51 +0,0 @@
---
- name: Get pvc name to verify successful pvc deletion
shell: >
kubectl get pvc {{ test_name }}
-o custom-columns=:spec.volumeName -n litmus
--no-headers
args:
executable: /bin/bash
register: pv
- name: Delete percona mysql pod
shell: >
source ~/.profile; kubectl delete -f {{ pod_yaml_alias }}
-n litmus
args:
executable: /bin/bash
- name: Delete the TPCC config map
shell: kubectl delete cm tpcc-config -n litmus
args:
executable: /bin/bash
- name: Confirm percona pod has been deleted
shell: source ~/.profile; kubectl get pods -n litmus
args:
executable: /bin/bash
register: result
until: "'percona' not in result.stdout"
delay: 30
retries: 12
- block:
- name: Confirm pvc pod has been deleted
shell: >
kubectl get pods -n litmus | grep {{ pv.stdout }}
args:
executable: /bin/bash
register: result
failed_when: "'pvc' and 'Running' in result.stdout"
delay: 30
retries: 12
when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')"
- block:
- name: Remove the local persistent volume
shell: kubectl delete pv {{ pv.stdout }}
args:
executable: /bin/bash
register: result
failed_when: "'persistentvolume' and 'deleted' not in result.stdout"
when: "'local-storage' in lookup('env','PROVIDER_STORAGE_CLASS')"

View File

@ -1,17 +0,0 @@
---
## TEST-SPECIFIC PARAMS
test_name: mysql-benchmark
pod_yaml_alias: mysql.yml
## PROVIDER-SPECIFIC PARARMS
# OpenEBS
openebs_operator:
- maya-apiserver
- openebs-provisioner
# Local Volume
local_pv_name: local-pv

View File

@ -1,9 +0,0 @@
{
"db_user": "root",
"db_password": "k8sDem0",
"warehouses": "1",
"connections": "16",
"warmup_period": "10",
"run_duration": "120",
"interval": "2"
}

View File

@ -1,15 +0,0 @@
# Litmus Deep Dive
Litmus tests range from initial setup and configuration validation to deploying and running workloads under various conditions and failures.
Litmus comprises the following major components:
- **Deployments** that help in setting up different types of Kubernetes Clusters like on-premise, cloud, OpenShift, etc. The default is that the deployment scripts to provision and configure OpenEBS storage, however, these deployments are easily extended to support other storage.
- **Framework** for test execution that includes:
* Defining and running test suites
* Capturing logs and generating reports about the test runs
* Fault/Error injection tools that help to perform chaos tests
* Examples that demonstrate how to integrate these test pipelines with Slack notifications
- **Test modules** that can be triggered from within a Kubernetes cluster. Think of these as containerized tests. For instance, the **_mysql-client_** can be launched as a pod to validate the MySQL resiliency while the underlying nodes and the connected storage are subjected to chaos engineering.
- **Tests** that themselves are written in easy to understand formats, either in plain English (thanks Godog!) or in Ansible Playbooks. These tests primarily interact with the Kubernetes cluster via **_kubectl_** making them highly portable.
Litmus can be used to test a given workload in a variety of Kubernetes environments, for example, a developer minikube or a GKE cluster with a specific storage solution or as a part of a full-fledged CI setup.

View File

@ -1,24 +0,0 @@
# Running a Complete Test Suite
The Litmus test suite can be run on a Kubernetes cluster using an Ansible-based executor framework.
This involves:
- Setting up Ansible on any Linux machine (Ansible test harness), with SSH access to the Kubernetes cluster
- Generating the ansible inventory file with host information (master/control node & hosts)
- Modifying a global variables file to:
- Set Provider and storage class
- Select test Category (call or subset)
- Enable/Disable some services like log collection, notifications etc..,
Follow the executor/README for detailed instructions on how to perform the above steps. Once these pre-requisites
have been met, execute the following on the Ansible test harness:
```
./litmus/executor/ansible/run-litmus.sh
```
The above script will verify that it has all the details required for it to proceed and provides you with
test task execution status.
*Litmus may take a while to show a reaction as it puts the system through rigorous scrutiny!*

View File

@ -1,5 +0,0 @@
This folder will contain the code that can help execute all or a subset of tests. There can be many
different ways of executing a series of tests, for example, Ansible is one of the ways to execute the tests.
The executor helps with performing batch execution of Litmus tests with ability to select/skip tests &
consolidate results or for dashboarding purposes.

View File

@ -1,18 +0,0 @@
# Litmus Executer
Litmus executor runs the litmusbook sequentially as order is defined in all.csv file.
**In order to run executer perform the following steps:**
- Update the **all.csv** present in ```/executer/ansible/``` in this manner: **[test-type]:[test-path]:[litmus-job-label]**
**Example:**
```
deployers:/apps/percona/deployers/run_litmus_test.yml:percona-deployment-litmus
loadgen:/apps/percona/workload/run_litmus_test.yml:percona-loadgen-litmus
chaos:apps/percona/chaos/openebs_replica_network_delay:openebs-replica-network-delay-litmus
```
- Then simply run the bash file **execute.sh** present in ```/executer/ansible/```.
**Example:**
```bash execute.sh```
# NOTE
**Following things should be considered before running the executer:**
1. No spaces are allowed in all.csv file.
2. The label for litmus job should be like:**[test-name]-litmus**.

View File

@ -1,4 +0,0 @@
deployers:/apps/percona/deployers/run_litmus_test.yml:percona-deployment-litmus
loadgen:/apps/percona/workload/run_litmus_test.yml:percona-loadgen-litmus
chaos:apps/percona/chaos/openebs_replica_network_delay:openebs-replica-network-delay-litmus
1 deployers:/apps/percona/deployers/run_litmus_test.yml:percona-deployment-litmus
2 loadgen:/apps/percona/workload/run_litmus_test.yml:percona-loadgen-litmus
3 chaos:apps/percona/chaos/openebs_replica_network_delay:openebs-replica-network-delay-litmus

View File

@ -1,363 +0,0 @@
# config file for ansible -- http://ansible.com/
# ==============================================
# nearly all parameters can be overridden in ansible-playbook
# or with command line flags. ansible will read ANSIBLE_CONFIG,
# ansible.cfg in the current working directory, .ansible.cfg in
# the home directory or /etc/ansible/ansible.cfg, whichever it
# finds first
[defaults]
# some basic default values...
inventory = ./inventory/hosts
callback_plugins = ./plugins/callback
stdout_callback = openebs
#library = /usr/share/my_modules/
#remote_tmp = ~/.ansible/tmp
#local_tmp = ~/.ansible/tmp
#forks = 5
#poll_interval = 15
#sudo_user = root
#ask_sudo_pass = True
#ask_pass = True
#transport = smart
#remote_port = 22
#module_lang = C
#module_set_locale = False
# plays will gather facts by default, which contain information about
# the remote system.
#
# smart - gather by default, but don't regather if already gathered
# implicit - gather by default, turn off with gather_facts: False
# explicit - do not gather by default, must say gather_facts: True
#gathering = implicit
# by default retrieve all facts subsets
# all - gather all subsets
# network - gather min and network facts
# hardware - gather hardware facts (longest facts to retrieve)
# virtual - gather min and virtual facts
# facter - import facts from facter
# ohai - import facts from ohai
# You can combine them using comma (ex: network,virtual)
# You can negate them using ! (ex: !hardware,!facter,!ohai)
# A minimal set of facts is always gathered.
#gather_subset = all
# some hardware related facts are collected
# with a maximum timeout of 10 seconds. This
# option lets you increase or decrease that
# timeout to something more suitable for the
# environment.
# gather_timeout = 10
# additional paths to search for roles in, colon separated
roles_path = ./roles/
# uncomment this to disable SSH key host checking
#host_key_checking = False
# change the default callback
#stdout_callback = skippy
# enable additional callbacks
#callback_whitelist = timer, mail
# Determine whether includes in tasks and handlers are "static" by
# default. As of 2.0, includes are dynamic by default. Setting these
# values to True will make includes behave more like they did in the
# 1.x versions.
#task_includes_static = True
#handler_includes_static = True
# Controls if a missing handler for a notification event is an error or a warning
#error_on_missing_handler = True
# change this for alternative sudo implementations
#sudo_exe = sudo
# What flags to pass to sudo
# WARNING: leaving out the defaults might create unexpected behaviours
#sudo_flags = -H -S -n
# SSH timeout
#timeout = 10
# default user to use for playbooks if user is not specified
# (/usr/bin/ansible will use current user as default)
#remote_user = root
# logging is off by default unless this path is defined
# if so defined, consider logrotate
#log_path = /var/log/ansible.log
# default module name for /usr/bin/ansible
#module_name = command
# use this shell for commands executed under sudo
# you may need to change this to bin/bash in rare instances
# if sudo is constrained
#executable = /bin/sh
# if inventory variables overlap, does the higher precedence one win
# or are hash values merged together? The default is 'replace' but
# this can also be set to 'merge'.
#hash_behaviour = replace
# by default, variables from roles will be visible in the global variable
# scope. To prevent this, the following option can be enabled, and only
# tasks and handlers within the role will see the variables there
#private_role_vars = yes
# list any Jinja2 extensions to enable here:
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
# if set, always use this private key file for authentication, same as
# if passing --private-key to ansible or ansible-playbook
#private_key_file = /path/to/file
# If set, configures the path to the Vault password file as an alternative to
# specifying --vault-password-file on the command line.
#vault_password_file = /path/to/vault_password_file
# format of string {{ ansible_managed }} available within Jinja2
# templates indicates to users editing templates files will be replaced.
# replacing {file}, {host} and {uid} and strftime codes with proper values.
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
# in some situations so the default is a static string:
#ansible_managed = Ansible managed
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
# messages. NOTE: the task header will still be shown regardless of whether or not the
# task is skipped.
#display_skipped_hosts = True
# by default, if a task in a playbook does not include a name: field then
# ansible-playbook will construct a header that includes the task's action but
# not the task's args. This is a security feature because ansible cannot know
# if the *module* considers an argument to be no_log at the time that the
# header is printed. If your environment doesn't have a problem securing
# stdout from ansible-playbook (or you have manually specified no_log in your
# playbook on all of the tasks where you have secret information) then you can
# safely set this to True to get more informative messages.
#display_args_to_stdout = False
# by default (as of 1.3), Ansible will raise errors when attempting to dereference
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
# to revert the behavior to pre-1.3.
#error_on_undefined_vars = False
# by default (as of 1.6), Ansible may display warnings based on the configuration of the
# system running ansible itself. This may include warnings about 3rd party packages or
# other conditions that should be resolved if possible.
# to disable these warnings, set the following value to False:
#system_warnings = True
# by default (as of 1.4), Ansible may display deprecation warnings for language
# features that should no longer be used and will be removed in future versions.
# to disable these warnings, set the following value to False:
#deprecation_warnings = True
# (as of 1.8), Ansible can optionally warn when usage of the shell and
# command module appear to be simplified by using a default Ansible module
# instead. These warnings can be silenced by adjusting the following
# setting or adding warn=yes or warn=no to the end of the command line
# parameter string. This will for example suggest using the git module
# instead of shelling out to the git command.
# command_warnings = False
# set plugin path directories here, separate with colons
#action_plugins = /usr/share/ansible/plugins/action
#cache_plugins = /usr/share/ansible/plugins/cache
#callback_plugins = /usr/share/ansible/plugins/callback
#connection_plugins = /usr/share/ansible/plugins/connection
#lookup_plugins = /usr/share/ansible/plugins/lookup
#inventory_plugins = /usr/share/ansible/plugins/inventory
#vars_plugins = /usr/share/ansible/plugins/vars
#filter_plugins = /usr/share/ansible/plugins/filter
#test_plugins = /usr/share/ansible/plugins/test
#strategy_plugins = /usr/share/ansible/plugins/strategy
# by default callbacks are not loaded for /bin/ansible, enable this if you
# want, for example, a notification or logging callback to also apply to
# /bin/ansible runs
#bin_ansible_callbacks = False
# don't like cows? that's unfortunate.
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
#nocows = 1
# set which cowsay stencil you'd like to use by default. When set to 'random',
# a random stencil will be selected for each task. The selection will be filtered
# against the `cow_whitelist` option below.
#cow_selection = default
#cow_selection = random
# when using the 'random' option for cowsay, stencils will be restricted to this list.
# it should be formatted as a comma-separated list with no spaces between names.
# NOTE: line continuations here are for formatting purposes only, as the INI parser
# in python does not support them.
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
# don't like colors either?
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
#nocolor = 1
# if set to a persistent type (not 'memory', for example 'redis') fact values
# from previous runs in Ansible will be stored. This may be useful when
# wanting to use, for example, IP information from one group of servers
# without having to talk to them in the same playbook run to get their
# current IP information.
#fact_caching = memory
# retry files
# When a playbook fails by default a .retry file will be created in ~/
# You can disable this feature by setting retry_files_enabled to False
# and you can change the location of the files by setting retry_files_save_path
retry_files_enabled = False
#retry_files_save_path = ~/.ansible-retry
# squash actions
# Ansible can optimise actions that call modules with list parameters
# when looping. Instead of calling the module once per with_ item, the
# module is called once with all items at once. Currently this only works
# under limited circumstances, and only with parameters named 'name'.
#squash_actions = apk,apt,dnf,homebrew,package,pacman,pkgng,yum,zypper
# prevents logging of task data, off by default
#no_log = False
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
#no_target_syslog = False
# controls whether Ansible will raise an error or warning if a task has no
# choice but to create world readable temporary files to execute a module on
# the remote machine. This option is False by default for security. Users may
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
# for more secure ways to fix this than enabling this option.
#allow_world_readable_tmpfiles = False
# controls the compression level of variables sent to
# worker processes. At the default of 0, no compression
# is used. This value must be an integer from 0 to 9.
#var_compression_level = 9
# controls what compression method is used for new-style ansible modules when
# they are sent to the remote system. The compression types depend on having
# support compiled into both the controller's python and the client's python.
# The names should match with the python Zipfile compression types:
# * ZIP_STORED (no compression. available everywhere)
# * ZIP_DEFLATED (uses zlib, the default)
# These values may be set per host via the ansible_module_compression inventory
# variable
#module_compression = 'ZIP_DEFLATED'
# This controls the cutoff point (in bytes) on --diff for files
# set to 0 for unlimited (RAM may suffer!).
#max_diff_size = 1048576
[privilege_escalation]
#become=True
#become_method=sudo
#become_user=root
#become_ask_pass=False
[paramiko_connection]
# uncomment this line to cause the paramiko connection plugin to not record new host
# keys encountered. Increases performance on new host additions. Setting works independently of the
# host key checking setting above.
#record_host_keys=False
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
# line to disable this behaviour.
#pty=False
[ssh_connection]
# ssh arguments to use
# Leaving off ControlPersist will result in poor performance, so use
# paramiko on older platforms rather than removing it, -C controls compression use
#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
# The path to use for the ControlPath sockets. This defaults to
# "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with
# very long hostnames or very long path names (caused by long user names or
# deeply nested home directories) this can exceed the character limit on
# file socket names (108 characters for most platforms). In that case, you
# may wish to shorten the string below.
#
# Example:
# control_path = %(directory)s/%%h-%%r
#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
# Enabling pipelining reduces the number of SSH operations required to
# execute a module on the remote server. This can result in a significant
# performance improvement when enabled, however when using "sudo:" you must
# first disable 'requiretty' in /etc/sudoers
#
# By default, this option is disabled to preserve compatibility with
# sudoers configurations that have requiretty (the default on many distros).
#
#pipelining = False
# Control the mechanism for transferring files
# * smart = try sftp and then try scp [default]
# * True = use scp only
# * False = use sftp only
#scp_if_ssh = smart
# if False, sftp will not use batch mode to transfer files. This may cause some
# types of file transfer failures impossible to catch however, and should
# only be disabled if your sftp version has problems with batch mode
#sftp_batch_mode = False
[accelerate]
#accelerate_port = 5099
#accelerate_timeout = 30
#accelerate_connect_timeout = 5.0
# The daemon timeout is measured in minutes. This time is measured
# from the last activity to the accelerate daemon.
#accelerate_daemon_timeout = 30
# If set to yes, accelerate_multi_key will allow multiple
# private keys to be uploaded to it, though each user must
# have access to the system via SSH to add a new key. The default
# is "no".
#accelerate_multi_key = yes
[selinux]
# file systems that require special treatment when dealing with security context
# the default behaviour that copies the existing context or uses the user default
# needs to be changed to use the file system dependent context.
#special_context_filesystems=nfs,vboxsf,fuse,ramfs
# Set this to yes to allow libvirt_lxc connections to work without SELinux.
#libvirt_lxc_noseclabel = yes
[colors]
#highlight = white
#verbose = blue
#warn = bright purple
#error = red
#debug = dark gray
#deprecate = purple
#skip = cyan
#unreachable = red
#ok = green
#changed = yellow
#diff_add = green
#diff_remove = red
#diff_lines = cyan

View File

@ -1,23 +0,0 @@
#!/bin/bash
file=./all.csv
var=0;
update_file()
{
while IFS=':' read -r col1 col2 col3; do
echo "$col2:$col3"
test="${col3%-*}"
ansible-playbook ./utils/execute.yml -e "LABEL="$col3" PATH="$col2" TYPE="$col1" TESTNAME="$test"" -vv
done < $file
}
if [ ! -e $file ]; then
echo Unable to find CSV file
else
touch result.csv
update_file
var="$(cat result.csv | grep Pass | wc -l)"
echo "Number of test Passed: $var" >> result.csv
cat result.csv
rm result.csv
fi

View File

@ -1,21 +0,0 @@
---
#########################################
# Ansible Runtime Specifications #
#########################################
#Provider storage class
#Supported providers(openebs, local-pv)
storage_class: openebs-standard
#Option to enable slack notifications to specified channel
#Accepted entries(true, false): default:true
slack_notify: true
#Select the desired application
#Supported applications(percona, tba, all)
application:
- percona
- fio
litmus_dir: "{{ ansible_env.HOME }}/git/litmus"

View File

@ -1,3 +0,0 @@
---
ansible_connection: local
ansible_become_pass: "{{ lookup('env','LOCAL_USER_PASSWORD') }}"

View File

@ -1,2 +0,0 @@
localhost ansible_connection=local
ansible_become_pass="{{ lookup('env','LOCAL_USER_PASSWORD') }}"

View File

@ -1,24 +0,0 @@
---
- hosts: localhost
gather_facts: yes
vars:
testfile: "{{ playbook_dir }}/tests.out"
tasks:
- name: Clear existing test lists
file:
path: "{{ testfile }}"
state: absent
- name: Obtain list of Kubernetes test job specifications
include: utils/getFiles.yaml
dir="{{ litmus_dir }}/apps/{{ item }}/tests"
expr="^run_litmus"
tfile="{{ testfile }}"
with_items: "{{ application }}"
- name: Run the Kubernetes test jobs on selected storage providers
include: utils/runTest.yaml
with_lines: cat {{ testfile }}

View File

@ -1,6 +0,0 @@
# Ansible Plugins
-----------------
Contains custom plugins that can be integrated into Ansible. Currently holds the callback plugin which has been modified
to hide/suppress certain messages during failed retries in a loop-execution.

View File

@ -1,94 +0,0 @@
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
self.last_task = None
self.shown_title = False
def v2_playbook_on_handler_task_start(self, task):
self.super_ref.v2_playbook_on_handler_task_start(task)
self.shown_title = True
def v2_playbook_on_task_start(self, task, is_conditional):
self.last_task = task
self.shown_title = False
def display_task_banner(self):
if not self.shown_title:
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
self.shown_title = True
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display_task_banner()
self.super_ref.v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
#self.super_ref.v2_runner_on_ok(result)
def v2_runner_on_unreachable(self, result):
self.display_task_banner()
self.super_ref.v2_runner_on_unreachable(result)
def v2_runner_on_skipped(self, result):
pass
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
#self.super_ref.v2_runner_item_on_ok(result)
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_item_on_failed(self, result):
self.display_task_banner()
self.super_ref.v2_runner_item_on_failed(result)
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
final_result = result._result['retries'] - result._result['attempts']
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name,
final_result)
display_verbosity = self._display.verbosity
required_result = '_ansible_verbose_always'
if (display_verbosity > 2 or required_result in result._result):
if required_result not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.v('%s' % (msg))

View File

@ -1,372 +0,0 @@
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of ARA: Ansible Run Analysis.
#
# ARA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ARA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ARA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import flask
import itertools
import logging
import os
from ansible import __version__ as ansible_version
from ansible.plugins.callback import CallbackBase
from ara import models
from ara.models import db
from ara.webapp import create_app
from datetime import datetime
from distutils.version import LooseVersion
from oslo_serialization import jsonutils
# To retrieve Ansible CLI options
try:
from __main__ import cli
except ImportError:
cli = None
LOG = logging.getLogger('ara.callback')
app = create_app()
class IncludeResult(object):
"""
This is used by the v2_playbook_on_include callback to synthesize a task
result for calling log_task.
"""
def __init__(self, host, path):
self._host = host
self._result = {'included_file': path}
class CallbackModule(CallbackBase):
"""
Saves data from an Ansible run into a database
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'ara'
def __init__(self):
super(CallbackModule, self).__init__()
if not flask.current_app:
ctx = app.app_context()
ctx.push()
self.taskresult = None
self.task = None
self.play = None
self.playbook = None
self.stats = None
self.loop_items = []
self.play_counter = itertools.count()
self.task_counter = itertools.count()
if cli:
self._options = cli.options
else:
self._options = None
def get_or_create_host(self, hostname):
try:
host = (models.Host.query
.filter_by(name=hostname)
.filter_by(playbook_id=self.playbook.id)
.one())
except models.NoResultFound:
host = models.Host(name=hostname, playbook=self.playbook)
db.session.add(host)
db.session.commit()
return host
def get_or_create_file(self, path):
try:
if self.playbook.id:
file_ = (models.File.query
.filter_by(path=path)
.filter_by(playbook_id=self.playbook.id)
.one())
return file_
except models.NoResultFound:
pass
file_ = models.File(path=path, playbook=self.playbook)
db.session.add(file_)
db.session.commit()
try:
with open(path, 'r') as fd:
data = fd.read()
sha1 = models.content_sha1(data)
content = models.FileContent.query.get(sha1)
if content is None:
content = models.FileContent(content=data)
file_.content = content
except IOError:
LOG.warn('failed to open %s for reading', path)
return file_
def log_task(self, result, status, **kwargs):
"""
'log_task' is called when an individual task instance on a single
host completes. It is responsible for logging a single
'TaskResult' record to the database.
"""
LOG.debug('logging task result for task %s (%s), host %s',
self.task.name, self.task.id, result._host.get_name())
# An include_role task might end up putting an IncludeRole object
# inside the result object which we don't need
# https://github.com/ansible/ansible/issues/30385
if 'include_role' in result._result:
del result._result['include_role']
result.task_start = self.task.time_start
result.task_end = datetime.now()
host = self.get_or_create_host(result._host.get_name())
# Use Ansible's CallbackBase._dump_results in order to strip internal
# keys, respect no_log directive, etc.
if self.loop_items:
# NOTE (dmsimard): There is a known issue in which Ansible can send
# callback hooks out of order and "exit" the task before all items
# have returned, this can cause one of the items to be missing
# from the task result in ARA.
# https://github.com/ansible/ansible/issues/24207
results = [self._dump_results(result._result)]
for item in self.loop_items:
results.append(self._dump_results(item._result))
results = jsonutils.loads(jsonutils.dumps(results))
else:
results = jsonutils.loads(self._dump_results(result._result))
# Ignore errors can be "yes" instead of a proper boolean in <2.3
# for some reason
ignore_errors = kwargs.get('ignore_errors', False)
if LooseVersion(ansible_version) < LooseVersion('2.3.0'):
if not isinstance(ignore_errors, bool):
ignore_errors = True if ignore_errors == "yes" else False
self.taskresult = models.TaskResult(
task=self.task,
host=host,
time_start=result.task_start,
time_end=result.task_end,
result=jsonutils.dumps(results),
status=status,
changed=result._result.get('changed', False),
failed=result._result.get('failed', False),
skipped=result._result.get('skipped', False),
unreachable=result._result.get('unreachable', False),
ignore_errors=ignore_errors,
)
db.session.add(self.taskresult)
db.session.commit()
if self.task.action == 'setup' and 'ansible_facts' in result._result:
values = jsonutils.dumps(result._result['ansible_facts'])
facts = models.HostFacts(values=values)
host.facts = facts
db.session.add(facts)
db.session.commit()
def log_stats(self, stats):
"""
Logs playbook statistics to the database.
"""
LOG.debug('logging stats')
hosts = sorted(stats.processed.keys())
for hostname in hosts:
host = self.get_or_create_host(hostname)
host_stats = stats.summarize(hostname)
db.session.add(models.Stats(
playbook=self.playbook,
host=host,
changed=host_stats['changed'],
unreachable=host_stats['unreachable'],
failed=host_stats['failures'],
ok=host_stats['ok'],
skipped=host_stats['skipped']
))
db.session.commit()
def close_task(self):
"""
Marks the completion time of the currently active task.
"""
if self.task is not None:
LOG.debug('closing task %s (%s)',
self.task.name,
self.task.id)
self.task.stop()
db.session.add(self.task)
db.session.commit()
self.task = None
self.loop_items = []
def close_play(self):
"""
Marks the completion time of the currently active play.
"""
if self.play is not None:
LOG.debug('closing play %s (%s)', self.play.name, self.play.id)
self.play.stop()
db.session.add(self.play)
db.session.commit()
self.play = None
def close_playbook(self):
"""
Marks the completion time of the currently active playbook.
"""
if self.playbook is not None:
LOG.debug('closing playbook %s', self.playbook.path)
self.playbook.stop()
self.playbook.complete = True
db.session.add(self.playbook)
db.session.commit()
def v2_runner_item_on_ok(self, result):
self.loop_items.append(result)
def v2_runner_item_on_failed(self, result):
self.loop_items.append(result)
def v2_runner_item_on_skipped(self, result):
self.loop_items.append(result)
def v2_runner_retry(self, result):
self.loop_items.append(result)
def v2_runner_on_ok(self, result, **kwargs):
self.log_task(result, 'ok', **kwargs)
def v2_runner_on_unreachable(self, result, **kwargs):
self.log_task(result, 'unreachable', **kwargs)
def v2_runner_on_failed(self, result, **kwargs):
self.log_task(result, 'failed', **kwargs)
def v2_runner_on_skipped(self, result, **kwargs):
self.log_task(result, 'skipped', **kwargs)
def v2_playbook_on_task_start(self, task, is_conditional,
is_handler=False):
self.close_task()
LOG.debug('starting task %s (action %s)',
task.name, task.action)
pathspec = task.get_path()
if pathspec:
path, lineno = pathspec.split(':', 1)
lineno = int(lineno)
file_ = self.get_or_create_file(path)
else:
path = self.playbook.path
lineno = 1
file_ = self.get_or_create_file(self.playbook.path)
self.task = models.Task(
name=task.get_name(),
sortkey=next(self.task_counter),
action=task.action,
play=self.play,
playbook=self.playbook,
tags=jsonutils.dumps(task._attributes['tags']),
file=file_,
lineno=lineno,
is_handler=is_handler)
self.task.start()
db.session.add(self.task)
db.session.commit()
def v2_playbook_on_handler_task_start(self, task):
self.v2_playbook_on_task_start(task, False, is_handler=True)
def v2_playbook_on_start(self, playbook):
path = os.path.abspath(playbook._file_name)
if self._options is not None:
options = self._options.__dict__.copy()
else:
options = {}
# Potentially sanitize some user-specified keys
for parameter in app.config['ARA_IGNORE_PARAMETERS']:
if parameter in options:
msg = "Parameter not saved by ARA due to configuration"
options[parameter] = msg
LOG.debug('starting playbook %s', path)
self.playbook = models.Playbook(
ansible_version=ansible_version,
path=path,
options=options
)
self.playbook.start()
db.session.add(self.playbook)
db.session.commit()
file_ = self.get_or_create_file(path)
file_.is_playbook = True
# We need to persist the playbook id so it can be used by the modules
data = {
'playbook': {
'id': self.playbook.id
}
}
tmpfile = os.path.join(app.config['ARA_TMP_DIR'], 'ara.json')
with open(tmpfile, 'w') as file:
file.write(jsonutils.dumps(data))
def v2_playbook_on_play_start(self, play):
self.close_task()
self.close_play()
LOG.debug('starting play %s', play.name)
if self.play is not None:
self.play.stop()
self.play = models.Play(
name=play.name,
sortkey=next(self.play_counter),
playbook=self.playbook
)
self.play.start()
db.session.add(self.play)
db.session.commit()
def v2_playbook_on_stats(self, stats):
self.log_stats(stats)
self.close_task()
self.close_play()
self.close_playbook()
LOG.debug('closing database')
db.session.close()

View File

@ -1,63 +0,0 @@
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import (
CallbackModule as CallbackModule_default
)
from ansible import constants as C
"""Implementation of Custom Class that inherits the 'default' stdout_callback
plugin and overrides the v2_runner_retry api for displaying the 'FAILED -
RETRYING' only during verbose mode."""
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'openebs'
CALLBACK_NEEDS_WHITELIST = False
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
final_result = result._result['retries'] - result._result['attempts']
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name,
final_result)
display_verbosity = self._display.verbosity
required_result = '_ansible_verbose_always'
if (display_verbosity > 2 or required_result in result._result):
if required_result not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.v('%s' % (msg))
def v2_runner_on_skipped(self, result):
my_result = result._result
required_result = '_ansible_verbose_always'
if C.DISPLAY_SKIPPED_HOSTS:
if (self._display.verbosity > 0 or required_result in my_result):
if required_result not in my_result:
dumped_results = self._dump_results(my_result)
msg = "skipping: [%s] => %s" % (result._host.get_name(),
dumped_results)
self._display.display(msg, color=C.COLOR_SKIP)
else:
self._display.display("skipping task..", color=C.COLOR_SKIP)
def v2_runner_item_on_skipped(self, result):
my_result = result._result
required_result = '_ansible_verbose_always'
if C.DISPLAY_SKIPPED_HOSTS:
if (self._display.verbosity > 0 or required_result in my_result):
if required_result not in my_result:
required_item = self._get_item(my_result)
dumped_result = self._dump_results(my_result)
result_host = result._host.get_name()
msg = "skipping: [%s] => (item=%s) => %s" % (result_host,
required_item,
dumped_result)
self._display.display(msg, color=C.COLOR_SKIP)

View File

@ -1,127 +0,0 @@
# (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.plugins.callback import CallbackBase
import datetime
# define start time
t0 = tn = datetime.datetime.utcnow()
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp():
global tn
time_current = datetime.datetime.utcnow()
time_elapsed = (time_current - tn).total_seconds()
time_total_elapsed = (time_current - t0).total_seconds()
print( filled( '%s (delta: %s) %s elapsed: %s' % (time_current.isoformat(),
time_elapsed, ' ' * 7, time_total_elapsed )))
tn = datetime.datetime.utcnow()
class CallbackModule(CallbackBase):
"""
this is an example ansible callback file that does nothing. You can drop
other classes in the same directory to define your own handlers. Methods
you do not use can be omitted.
example uses include: logging, emailing, storing info, etc
"""
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_error(self, host, msg):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
timestamp()
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
timestamp()
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, pattern):
timestamp()
self._display.display(filled("", fchar="="))
pass
def playbook_on_stats(self, stats):
timestamp()
self._display.display(filled("", fchar="="))
pass

View File

@ -1,10 +0,0 @@
The provider contains playbooks to setup storage providers on the Kubernetes cluster. This may involve installing Operators,
static provisioning of disk resources (say, Kubernetes Local Persistent Volume), or cloud storage (Google persistent disks).
The playbook may be self-contained, i.e., consist of all steps required to setup the storage provider or can invoke a role
created for that purpose

View File

@ -1,11 +0,0 @@
### Setting up local disk resources
- Manually discover, format and mount the disk on desired node
- Update templates/pv.yaml with appropriate node and disk mount location
#### Note:
- The local PV is beta in Kubernetes 1.10.
- The standard PersistentVolumeReclaim policy is "Retain", "Delete" is yet to be supported in all types of clusters

View File

@ -1,8 +0,0 @@
---
- hosts: kubernetes-kubemasters
roles:
- role: k8s-local-pv
template_path: "{{ playbook_dir }}/templates"
local_storage_class: storage_class.yaml
local_pv: pv.yaml

View File

@ -1,22 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-pv
spec:
capacity:
storage: 5G
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/disks/vol1
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- kubeminion-01

View File

@ -1,7 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@ -1,3 +0,0 @@
---
local_storageclass_yaml: local-storage-sc.yaml

View File

@ -1,51 +0,0 @@
---
- name: Get kubernetes master name
shell: hostname
args:
executable: /bin/bash
register: result
- name: Get kubernetes master status
shell: source ~/.profile; kubectl get nodes | grep {{ result.stdout.lower()}} | awk '{print $2}'
args:
executable: /bin/bash
register: result
until: result.stdout == 'Ready'
delay: 60
retries: 10
ignore_errors: true
- name:
debug:
msg: "Ending play, K8S Master NOT READY"
when: result.stdout != "Ready"
- name: Ending Playbook Run - K8S master is NOT READY
meta: end_play
when: result.stdout != "Ready"
- name: Copy local-storage artifacts to kube-master
copy:
src: "{{ item }}"
dest: "{{ ansible_env.HOME }}"
with_items:
- "{{ template_path }}/{{ local_storage_class }}"
- "{{ template_path }}/{{ local_pv }}"
- name: Deploy the local-pv storageclass
shell: >
source ~/.profile;
kubectl apply -f {{ ansible_env.HOME }}/{{ local_storage_class }}
args:
executable: /bin/bash
register: result
failed_when: "'storageclass' and ('created' or 'configured') not in result.stdout"
- name: Create the local persistent volume
shell: >
source ~/.profile;
kubectl apply -f {{ ansible_env.HOME }}/{{ local_pv }}
args:
executable: /bin/bash
register: result
failed_when: "'persistentvolume' and ('created' or 'configured') not in result.stdout"

View File

@ -1,47 +0,0 @@
---
- hosts: localhost
connection: local
tasks:
- block:
- name: Running the defined application
shell: kubectl create -f {{ PATH }}
- name: Getting status of JOB
shell: kubectl get pod -n litmus -l app={{ LABEL }} -o jsonpath='{.items[0].status.phase}'
register: status
until: "'Succeeded' in status.stdout"
delay: 30
retries: 30
- name: Checking the status of Ansible-test container
shell: kubectl get pod -n litmus -l app={{ LABEL }} -o jsonpath='{.items[0].status.containerStatuses[0].ready}'
register: container_status
until: "'false' in container_status.stdout"
- name: Getting result CR
shell: kubectl get lr {{ TESTNAME }} -o jsonpath='{.spec.testStatus.result}'
register: result
until: "'none' not in result.stdout"
delay: 20
retries: 10
- name: Updating the test-result csv
lineinfile:
path: /executor/ansible/result.csv
state: present
line: '{{ TYPE }} : {{ TESTNAME }} : {{ result.stdout }}'
rescue:
- set_fact:
flag: "Fail"
- name: Updating the test-result csv
lineinfile:
path: /executor/ansible/result.csv
state: present
line: '{{ TYPE }} : {{ TESTNAME }} : {{ flag }}'

View File

@ -1,21 +0,0 @@
---
- name: Obtain list of Kubernetes test job specifications
find:
paths: "{{ dir }}"
patterns: "{{ expr }}"
recurse: yes
use_regex: yes
register: result
- debug:
msg: "{{ item.path }}"
with_items: "{{ result.files }}"
- name: Create test path list
lineinfile:
path: "{{ tfile }}"
line: "{{ item.path }}"
state: present
create: yes
with_items: "{{ result.files }}"

View File

@ -1,44 +0,0 @@
---
- name: Get the Test item from Test Job List
set_fact:
test: "{{ item }}"
- block:
- name: Replace the storage class based on provider
replace:
path: "{{ test }}"
regexp: "openebs-standard"
replace: "{{ storage_class }}"
- name: Run the test Kubernetes job YAML
shell: source ~/.profile; kubectl create -f run_litmus_test.yaml
args:
executable: /bin/bash
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
- name: Verify the test Kubernetes job is Successful
# This is a placeholder task that waits 30m for job complete
shell: >
source ~/.profile;
kubectl get job litmus --no-headers
-n litmus -o custom-columns=:status.succeeded
args:
executable: /bin/bash
register: result
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
until: "result.stdout|int == 1"
delay: 120
retries: 15
## TODO: Result CR parse, slack notify if applicable
rescue:
- name: Handle job failure
debug:
msg: "Unable to complete test, please examine the job spec for {{ test }}"
always:
- name: Message b/w test job runs
debug:
msg: "Moving to next test..."

View File

@ -1,10 +0,0 @@
#!/usr/bin/env sh
set -o errexit
set -o nounset
CURDIR=`pwd`
cd "$1" && godog --stop-on-failure e2e.feature
cd ${CURDIR}

View File

@ -1,68 +0,0 @@
#!/usr/bin/env sh
set -o errexit
set -o nounset
CURDIR=`pwd`
#Install latest minikube
ls /usr/local/bin/minikube || \
(curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \
&& chmod +x minikube \
&& sudo mv minikube /usr/local/bin/)
#Install latest kubectl
ls /usr/local/bin/kubectl || \
(curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \
&& chmod +x kubectl \
&& sudo mv kubectl /usr/local/bin/)
#Setup minikube
mkdir -p $HOME/.minikube
mkdir -p $HOME/.kube
touch $HOME/.kube/config
# Push these ENV k:v to $HOME/.profile to start/restart minikube.
grep "MINIKUBE_WANTUPDATENOTIFICATION=false" $HOME/.profile || \
echo "MINIKUBE_WANTUPDATENOTIFICATION=false" >> $HOME/.profile
grep "MINIKUBE_WANTREPORTERRORPROMPT=false" $HOME/.profile || \
echo "MINIKUBE_WANTREPORTERRORPROMPT=false" >> $HOME/.profile
grep "MINIKUBE_HOME=$HOME" $HOME/.profile || \
echo "MINIKUBE_HOME=$HOME" >> $HOME/.profile
grep "CHANGE_MINIKUBE_NONE_USER=true" $HOME/.profile || \
echo "CHANGE_MINIKUBE_NONE_USER=true" >> $HOME/.profile
grep "KUBECONFIG=$HOME/.kube/config" $HOME/.profile || \
echo "KUBECONFIG=$HOME/.kube/config" >> $HOME/.profile
# Export above as well for `minikube start` to work
# in the same session of `vagrant up`
export MINIKUBE_WANTUPDATENOTIFICATION=false
export MINIKUBE_WANTREPORTERRORPROMPT=false
export MINIKUBE_HOME=$HOME
export CHANGE_MINIKUBE_NONE_USER=true
export KUBECONFIG=$HOME/.kube/config
# Permissions
sudo chown -R $USER $HOME/.kube
sudo chgrp -R $USER $HOME/.kube
sudo chown -R $USER $HOME/.minikube
sudo chgrp -R $USER $HOME/.minikube
# Start minikube on this host itself with RBAC enabled
sudo -E minikube start --vm-driver=none --extra-config=apiserver.Authorization.Mode=RBAC
# Wait for Kubernetes to be up and ready.
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done
echo ""
echo "================================================"
echo "Congrats!! minikube apiserver is running"
echo "================================================"
echo ""
cd ${CURDIR}

View File

@ -1,64 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockgen -package exec -source=exec.go -destination exec_mock.go
package exec
import (
"bytes"
"fmt"
osexec "os/exec"
"strings"
)
// Executor acts as a contract for various execution based logic
type Executor interface {
Output(args []string) (output string, err error)
}
// ShellExec is a shell based struct that implements Executor interface
type ShellExec struct {
binary string
}
// NewShellExec returns a new instance of shellExec
// based on the provided binary name
func NewShellExec(binary string) *ShellExec {
return &ShellExec{
binary: binary,
}
}
// Output executes the shell command and returns the output or error
func (e *ShellExec) Output(args []string) (output string, err error) {
var out bytes.Buffer
var stderr bytes.Buffer
cmd := osexec.Command(e.binary, args...)
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
err = fmt.Errorf("failed to run cmd '%s': %s: %s", cmd.Args, fmt.Sprint(err), stderr.String())
return
}
// This removes the beginning & trailing single quotes from the output
// It has been observed that kubectl execution results in such single quotes
output = strings.Trim(out.String(), "'")
return
}

View File

@ -1,46 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: exec.go
// Package exec is a generated GoMock package.
package exec
import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockExecutor is a mock of Executor interface
type MockExecutor struct {
ctrl *gomock.Controller
recorder *MockExecutorMockRecorder
}
// MockExecutorMockRecorder is the mock recorder for MockExecutor
type MockExecutorMockRecorder struct {
mock *MockExecutor
}
// NewMockExecutor creates a new mock instance
func NewMockExecutor(ctrl *gomock.Controller) *MockExecutor {
mock := &MockExecutor{ctrl: ctrl}
mock.recorder = &MockExecutorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockExecutor) EXPECT() *MockExecutorMockRecorder {
return m.recorder
}
// Output mocks base method
func (m *MockExecutor) Output(args []string) (string, error) {
ret := m.ctrl.Call(m, "Output", args)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Output indicates an expected call of Output
func (mr *MockExecutorMockRecorder) Output(args interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Output", reflect.TypeOf((*MockExecutor)(nil).Output), args)
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exec
import (
"testing"
)
func TestNewShellExec(t *testing.T) {
testCases := map[string]struct {
binary string
expectValue *ShellExec
}{
"String Empty": {
binary: "",
expectValue: &ShellExec{binary: ""},
},
"Present string value": {
binary: "_MY_PRESENT_STRING_",
expectValue: &ShellExec{binary: "_MY_PRESENT_STRING_"},
},
}
for k, v := range testCases {
t.Run(k, func(t *testing.T) {
if v.binary != v.expectValue.binary {
t.Errorf("expected %s got %s", v.expectValue.binary, v.binary)
}
})
}
}

View File

@ -1,344 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockgen -package kubectl -source=kubectl.go -destination kubectl_mock.go
package kubectl
import (
"fmt"
"strings"
"github.com/openebs/litmus/pkg/exec"
"github.com/openebs/litmus/pkg/util"
)
// KubectlFile is the type to hold various yaml file paths
// that can be applied by kubectl
type KubectlFile string
const (
// KubectlPath is the expected location where kubectl executable may be found
KubectlPath = "/usr/local/bin/kubectl"
)
const (
// DefaultLitmusNamespace is the default namespace where kubectl operations
// will be executed
DefaultLitmusNamespace = "litmus"
)
// kubectlArgs builds the arguments required to execute any kubectl command
//
// This has been borrowed from https://github.com/CanopyTax/ckube
func kubectlArgs(args []string, namespace string, context string, labels string) []string {
if len(namespace) != 0 {
args = append(args, fmt.Sprintf("--namespace=%v", strings.TrimSpace(namespace)))
}
if len(context) != 0 {
args = append(args, fmt.Sprintf("--context=%v", strings.TrimSpace(context)))
}
if len(labels) != 0 {
args = append(args, fmt.Sprintf("--selector=%v", strings.TrimSpace(labels)))
}
return args
}
// KubeRunner interface provides the contract i.e. method signature to
// invoke commands at kubernetes cluster
type KubeRunner interface {
// Run executes the kubectl command
Run(args []string) (output string, err error)
}
// Kubectl holds the properties required to execute any kubectl command.
// Kubectl is an implementation of following interfaces:
// 1. KubeRunner
type Kubectl struct {
// namespace where this kubectl command will be run
namespace string
// labels to be used during kubectl execution
labels string
// context where this kubectl command will be run
context string
// args are provided to kubectl command during its run
args []string
// executor does actual kubectl execution
executor exec.Executor
}
// GetKubectlPath gets the location where kubectl executable is
// expected to be present
func GetKubectlPath() string {
// get from environment variable
kpath := util.KubectlPathENV()
if len(kpath) == 0 {
// else use the constant
kpath = KubectlPath
}
return kpath
}
// New returns a new instance of kubectl based on defaults
func New() *Kubectl {
return &Kubectl{
namespace: DefaultLitmusNamespace,
executor: exec.NewShellExec(GetKubectlPath()),
}
}
// Namespace sets the namespace to be used during kubectl run
func (k *Kubectl) Namespace(namespace string) *Kubectl {
if len(namespace) == 0 {
return k
}
k.namespace = namespace
return k
}
// Labels sets the labels to be used during kubectl run
func (k *Kubectl) Labels(labels string) *Kubectl {
k.labels = labels
return k
}
// Context sets the context to be used during kubectl run
func (k *Kubectl) Context(context string) *Kubectl {
k.context = context
return k
}
// Args sets the args to be used during kubectl run
func (k *Kubectl) Args(args []string) *Kubectl {
k.args = args
return k
}
// Run will execute the kubectl command & provide output or error
func (k *Kubectl) Run(args []string) (output string, err error) {
k.args = kubectlArgs(args, k.namespace, k.context, k.labels)
output, err = k.executor.Output(k.args)
return
}
// IsPod flags if the provided kind is a kubernetes pod or is related
// to a pod
func IsPod(kind string) (yes bool) {
switch kind {
case "po", "pod", "pods", "deploy", "deployment", "deployments", "job", "jobs", "sts", "statefulset", "statefulsets", "ds", "daemonset", "daemonsets":
yes = true
default:
yes = false
}
return
}
// ArePodsRunning returns true if all the pod(s) are running, false otherwise
//
// An example of kubectl get pods & its state:
// This makes use of status.containerStatuses[*].state.*.reason
//
// $ kubectl get po -n kube-system --selector=k8s-app=kube-dns -o jsonpath='{.items[*].status.containerStatuses[*].state.*.reason}'
// CrashLoopBackOff CrashLoopBackOff
//
// Another example of kubectl get pods & its state:
// This makes use of status.containerStatuses[*].ready
//
// $ kubectl get pods -n kube-system --selector=k8s-app=kube-dns -o jsonpath='{.items[*].status.containerStatuses[*].ready}'
// true false true
func ArePodsRunning(k KubeRunner) (yes bool, err error) {
isReady, err := k.Run([]string{"get", "pods", "-o", "jsonpath='{.items[*].status.containerStatuses[*].ready}'"})
if err != nil {
return
}
// split the output by space
isReadyArr := strings.Split(isReady, " ")
if contains(isReadyArr, "false") {
err = fmt.Errorf("pod(s) are not running: '%#v'", isReadyArr)
return
}
// double check
if contains(isReadyArr, "true") {
yes = true
} else {
err = fmt.Errorf("status of pod(s) could not be determined: '%#v'", isReadyArr)
}
return
}
// IsPodRunning returns true if the specified pod is running, false otherwise
//
// An example of kubectl get pods & its state:
// This makes use of status.containerStatuses[*].state.*.reason
//
// $ kubectl get po -n kube-system my-pod -o jsonpath='{.status.containerStatuses[*].state.*.reason}'
// CrashLoopBackOff CrashLoopBackOff
//
// Another example of kubectl get pods & its state:
// This makes use of status.containerStatuses[*].ready
//
// $ kubectl get pods -n kube-system my-pod -o jsonpath='{.status.containerStatuses[*].ready}'
// true false true
func IsPodRunning(k KubeRunner, name string) (yes bool, err error) {
if len(name) == 0 {
err = fmt.Errorf("unable to determine pod running status: pod name is missing")
return
}
isReady, err := k.Run([]string{"get", "pods", name, "-o", "jsonpath='{.status.containerStatuses[*].ready}'"})
if err != nil {
return
}
// split the output by space
isReadyArr := strings.Split(isReady, " ")
if contains(isReadyArr, "false") {
err = fmt.Errorf("pod '%s' is not running: '%#v'", name, isReadyArr)
return
}
// double check
if contains(isReadyArr, "true") {
yes = true
} else {
err = fmt.Errorf("status of pod '%s' could not be determined: received output '%#v'", name, isReadyArr)
}
return
}
// GetPodNodes fetches the nodes that hosts the pods. Pods are referred to
// via the provided labels
func GetPodNodes(k KubeRunner) (nodes []string, err error) {
n, err := k.Run([]string{"get", "pods", "-o", "jsonpath='{.items[*].spec.nodeName}'"})
if err != nil {
return
}
// split the output by space
nodes = strings.Split(n, " ")
return
}
// GetPods fetches the pods based on the provided labels
func GetPods(k KubeRunner) (pods []string, err error) {
p, err := k.Run([]string{"get", "pods", "-o", "jsonpath='{.items[*].metadata.name}'"})
if err != nil {
return
}
// split the output by space
pods = strings.Split(p, " ")
return
}
// GetRunningPods fetches the pods which are running based on the provided labels
//
// Sample code to do this:
//
// $ JSONPATH='{range .items[*]}{@.metadata.name}::{@.status.containerStatuses[*].ready}::::{end}' && kubectl get po -n kube-system -o jsonpath="$JSONPATH"
// kube-addon-manager-amit-thinkpad-l470::true::::kube-dns-54cccfbdf8-q7v2c::false false true::::kubernetes-dashboard-77d8b98585-cwbjq::false::::storage-provisioner::true::::tiller-deploy-5b48764ff7-g9qz7::true::::
func GetRunningPods(k KubeRunner) (pods []string, err error) {
// fetch pods
o, err := k.Run([]string{"get", "pods", "-o", "jsonpath='{range .items[*]}{@.metadata.name}::{@.status.containerStatuses[*].ready}::::{end}'"})
if err != nil {
return
}
// split the output by the splitter used in above command
firstSplit := strings.Split(o, "::::")
for _, fs := range firstSplit {
if len(fs) == 0 {
continue
}
secondSplit := strings.Split(fs, "::")
// ignore if pod is not running
if strings.Contains(secondSplit[1], "false") {
continue
}
// add the running pod to the list
if strings.Contains(secondSplit[1], "true") {
pods = append(pods, secondSplit[0])
}
}
return
}
// GetOldestRunningPod fetches the oldest running pod based on the provided labels
// and sorted based on their age
//
// Sample code to do this:
//
// $ JSONPATH='{range .items[*]}{@.metadata.name}::{@.status.containerStatuses[*].ready}::::{end}' && kubectl get po -n kube-system --sort-by=.metadata.creationTimestamp -o jsonpath="$JSONPATH"
// kube-addon-manager-amit-thinkpad-l470::true::::kube-dns-54cccfbdf8-q7v2c::false false true::::kubernetes-dashboard-77d8b98585-cwbjq::false::::storage-provisioner::true::::tiller-deploy-5b48764ff7-g9qz7::true::::
func GetOldestRunningPod(k KubeRunner) (pod string, err error) {
// fetch pods sorted by creation timestamp
o, err := k.Run([]string{"get", "pods", "-o", "--sort-by=.metadata.creationTimestamp", "jsonpath='{range .items[*]}{@.metadata.name}::{@.status.containerStatuses[*].ready}::::{end}'"})
if err != nil {
return
}
// split the output by the splitter used in above command
firstSplit := strings.Split(o, "::::")
for _, fs := range firstSplit {
if len(fs) == 0 {
continue
}
secondSplit := strings.Split(fs, "::")
// ignore if pod is not running
if strings.Contains(secondSplit[1], "false") {
continue
}
// return the first running pod
if strings.Contains(secondSplit[1], "true") {
pod = secondSplit[0]
return
}
}
return
}
// DeletePod deletes the specified pod
func DeletePod(k KubeRunner, name string) (err error) {
_, err = k.Run([]string{"delete", "pods", name})
return
}
// contains verifies if a specific element is present in the provided array
func contains(s []string, e string) bool {
for _, a := range s {
a = strings.TrimSpace(a)
if a == e {
return true
}
}
return false
}

View File

@ -1,82 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: kubectl.go
// Package kubectl is a generated GoMock package.
package kubectl
import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockKubeRunner is a mock of KubeRunner interface
type MockKubeRunner struct {
ctrl *gomock.Controller
recorder *MockKubeRunnerMockRecorder
}
// MockKubeRunnerMockRecorder is the mock recorder for MockKubeRunner
type MockKubeRunnerMockRecorder struct {
mock *MockKubeRunner
}
// NewMockKubeRunner creates a new mock instance
func NewMockKubeRunner(ctrl *gomock.Controller) *MockKubeRunner {
mock := &MockKubeRunner{ctrl: ctrl}
mock.recorder = &MockKubeRunnerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockKubeRunner) EXPECT() *MockKubeRunnerMockRecorder {
return m.recorder
}
// Run mocks base method
func (m *MockKubeRunner) Run(args []string) (string, error) {
ret := m.ctrl.Call(m, "Run", args)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Run indicates an expected call of Run
func (mr *MockKubeRunnerMockRecorder) Run(args interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockKubeRunner)(nil).Run), args)
}
// MockKubeCtlRunner is a mock of KubeCtlRunner interface
type MockKubeCtlRunner struct {
ctrl *gomock.Controller
recorder *MockKubeCtlRunnerMockRecorder
}
// MockKubeCtlRunnerMockRecorder is the mock recorder for MockKubeCtlRunner
type MockKubeCtlRunnerMockRecorder struct {
mock *MockKubeCtlRunner
}
// NewMockKubeCtlRunner creates a new mock instance
func NewMockKubeCtlRunner(ctrl *gomock.Controller) *MockKubeCtlRunner {
mock := &MockKubeCtlRunner{ctrl: ctrl}
mock.recorder = &MockKubeCtlRunnerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockKubeCtlRunner) EXPECT() *MockKubeCtlRunnerMockRecorder {
return m.recorder
}
// Run mocks base method
func (m *MockKubeCtlRunner) Run(args []string) (string, error) {
ret := m.ctrl.Call(m, "Run", args)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Run indicates an expected call of Run
func (mr *MockKubeCtlRunnerMockRecorder) Run(args interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockKubeCtlRunner)(nil).Run), args)
}

View File

@ -1,578 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
"os"
"reflect"
"testing"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/openebs/litmus/pkg/exec"
)
func TestGetKubectlPath(t *testing.T) {
const kubectlPathENVK = "LITMUS_IO_KUBECTL_PATH"
tests := map[string]struct {
fn func()
expected string
}{
"test 101": {fn: func() {}, expected: KubectlPath},
"test 102": {fn: func() { os.Setenv(kubectlPathENVK, "someVal") }, expected: "someVal"},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
mock.fn()
path := GetKubectlPath()
if path != mock.expected {
t.Fatalf("test '%s' failed: expected value: %s, actual value: %s", name, mock.expected, path)
}
if path != KubectlPath {
os.Unsetenv(kubectlPathENVK)
}
})
}
}
func TestKubectl_Namespace(t *testing.T) {
const someNS = "someNS"
tests := map[string]struct {
namespace string
expected string
}{
"test 101": {expected: "litmus"},
"test 102": {namespace: someNS, expected: someNS},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
kubeCtl := New()
if mock.expected != kubeCtl.Namespace(mock.namespace).namespace {
t.Fatalf("test '%s' failed: expected value: %s, actual value %s", "101", mock.expected, kubeCtl.namespace)
}
})
}
}
func TestKubectl_Labels(t *testing.T) {
labels := "someLabel"
kubeCtl := New()
if labels != kubeCtl.Labels(labels).labels {
t.Fatalf("test '%s' failed: expected value: %s, actual value %s", "101", labels, kubeCtl.labels)
}
}
func TestKubectl_Context(t *testing.T) {
ctx := "someCtx"
kubeCtl := New()
if ctx != kubeCtl.Context(ctx).context {
t.Fatalf("test '%s' failed: expected value: %s, actual value %s", "101", ctx, kubeCtl.context)
}
}
func TestKubectl_Args(t *testing.T) {
args := []string{"something"}
kubeCtl := New()
if !reflect.DeepEqual(args, kubeCtl.Args(args).args) {
t.Fatalf("test '%s' failed: expected value: %s, actual value %s", "101", args, kubeCtl.args)
}
}
func TestKubectl_Run(t *testing.T) {
const (
namespace = "--namespace=litmus"
context = "someCtx"
labels = "someLabels"
)
type expected struct {
output string
err error
}
tests := map[string]struct {
args []string
expected *expected
}{
"test 101": {
args: []string{context, labels, namespace},
expected: &expected{
output: "--namespace=litmus--context=someCtx--selector=someLabels",
},
},
"test 201": {
args: []string{context, labels},
expected: &expected{
output: "--namespace=litmus--context=someCtx--selector=someLabels",
err: errors.New("some output error"),
},
},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockExecutor := exec.NewMockExecutor(ctrl)
mockExecutor.EXPECT().Output(mock.args).Return(mock.expected.output, mock.expected.err)
ctl := New()
ctl.executor = mockExecutor
output, err := ctl.Run(mock.args[:len(mock.args)-1])
if err != mock.expected.err {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.err, err)
}
if output != mock.expected.output {
t.Fatalf("test '%s' failed: expected value: %s, actual value %s", name, mock.expected.output, output)
}
})
}
}
func TestIsPod(t *testing.T) {
tests := map[string]struct {
pod string
expected bool
}{
"test 101": {pod: "po", expected: true},
"test 102": {pod: "pod", expected: true},
"test 103": {pod: "pods", expected: true},
"test 104": {pod: "deploy", expected: true},
"test 105": {pod: "deployment", expected: true},
"test 106": {pod: "deployments", expected: true},
"test 107": {pod: "job", expected: true},
"test 108": {pod: "jobs", expected: true},
"test 109": {pod: "sts", expected: true},
"test 110": {pod: "statefulset", expected: true},
"test 111": {pod: "statefulsets", expected: true},
"test 112": {pod: "ds", expected: true},
"test 113": {pod: "daemonset", expected: true},
"test 114": {pod: "daemonsets", expected: true},
"test 201": {},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
isPod := IsPod(mock.pod)
if mock.expected != isPod {
t.Fatalf("test '%s' failed: expected value: %t, actual value: %t", name, mock.expected, isPod)
}
})
}
}
func TestArePodsRunning(t *testing.T) {
args := []string{"get", "pods", "-o", "jsonpath='{.items[*].status.containerStatuses[*].ready}'"}
type expected struct {
runningBool bool
runningString string
runErr error
err error
}
tests := map[string]struct {
expected *expected
}{
"test 101": {
expected: &expected{
runningString: "true",
runningBool: true,
},
},
"test 201": {
expected: &expected{
runErr: errors.New("some running err"),
runningString: "false",
},
},
"test 202": {
expected: &expected{
err: fmt.Errorf("pod(s) are not running: '%#v'", []string{"true", "false"}),
runningString: "true false",
},
},
"test 203": {
expected: &expected{err: fmt.Errorf("status of pod(s) could not be determined: '%#v'", []string{""})},
},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return(mock.expected.runningString, mock.expected.runErr).AnyTimes()
running, err := ArePodsRunning(mockRunner)
if mock.expected.runErr != nil && err != mock.expected.runErr {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.runErr, err)
} else if mock.expected.err != nil && err.Error() != mock.expected.err.Error() {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.err, err)
}
if running != mock.expected.runningBool {
t.Fatalf("test '%s' failed: expected value: %t, actual value: %t", name, mock.expected.runningBool, running)
}
})
}
}
func TestIsPodRunning(t *testing.T) {
const someName = "someName"
args := []string{"get", "pods", "", "-o", "jsonpath='{.status.containerStatuses[*].ready}'"}
type expected struct {
runningBool bool
runningString string
runErr error
err error
}
tests := map[string]struct {
name string
expected *expected
}{
"test 101": {
name: someName,
expected: &expected{
runningBool: true,
runningString: "true",
},
},
"test 201": {
name: "",
expected: &expected{
runningString: "false",
err: errors.New("unable to determine pod running status: pod name is missing"),
},
},
"test 202": {
name: someName,
expected: &expected{
runningString: "false",
runErr: errors.New("some running err"),
},
},
"test 203": {
name: someName,
expected: &expected{
runningString: "false",
err: fmt.Errorf("pod '%s' is not running: '%#v'", someName, []string{"false"}),
},
},
"test 204": {
name: someName,
expected: &expected{err: fmt.Errorf("status of pod '%s' could not be determined: received output '%#v'", someName, []string{""})},
},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
args[2] = mock.name
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return(mock.expected.runningString, mock.expected.runErr).AnyTimes()
isRunning, err := IsPodRunning(mockRunner, mock.name)
if mock.expected.runErr != nil && err != mock.expected.runErr {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.runErr, err)
} else if mock.expected.err != nil && err.Error() != mock.expected.err.Error() {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.err, err)
}
if isRunning != mock.expected.runningBool {
t.Fatalf("test '%s' failed: expected value %t, actual value %t", name, mock.expected.runningBool, isRunning)
}
})
}
}
func TestGetPodNodes(t *testing.T) {
args := []string{"get", "pods", "-o", "jsonpath='{.items[*].spec.nodeName}'"}
type expected struct {
err error
nodes []string
}
tests := map[string]struct {
input string
expected *expected
}{
"test 101": {
input: "some stuff",
expected: &expected{nodes: []string{"some", "stuff"}},
},
"test 201": {
expected: &expected{err: errors.New("some running err")},
},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return(mock.input, mock.expected.err).AnyTimes()
nodes, err := GetPodNodes(mockRunner)
if err != mock.expected.err {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.err, err)
}
if !reflect.DeepEqual(nodes, mock.expected.nodes) {
t.Fatalf("test '%s' failed: expected value %v, actual value %v", name, mock.expected.nodes, nodes)
}
})
}
}
func TestGetPods(t *testing.T) {
args := []string{"get", "pods", "-o", "jsonpath='{.items[*].metadata.name}'"}
type expected struct {
err error
nodes []string
}
tests := map[string]struct {
input string
expected *expected
}{
"test 101": {
input: "some stuff",
expected: &expected{nodes: []string{"some", "stuff"}},
},
"test 201": {
expected: &expected{err: errors.New("some running err")},
},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return(mock.input, mock.expected.err).AnyTimes()
nodes, err := GetPods(mockRunner)
if err != mock.expected.err {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.expected.err, err)
}
if !reflect.DeepEqual(nodes, mock.expected.nodes) {
t.Fatalf("test '%s' failed: expected value %v, actual value %v", name, mock.expected.nodes, nodes)
}
})
}
}
func TestGetRunningPods(t *testing.T) {
args := []string{"get", "pods", "-o", "jsonpath='{range .items[*]}{@.metadata.name}::{@.status.containerStatuses[*].ready}::::{end}'"}
tests := map[string]struct {
err error
output []string
input string
}{
"test 101": {},
"test 102": {input: "true::false::::true::false"},
"test 103": {input: "false::true::::true::true", output: []string{"false", "true"}},
"test 201": {err: errors.New("some running err")},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return(mock.input, mock.err).AnyTimes()
output, err := GetRunningPods(mockRunner)
if err != mock.err {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.err, err)
}
if !reflect.DeepEqual(output, mock.output) {
t.Fatalf("test '%s' failed: expected value %v, actual value %v", name, mock.output, output)
}
})
}
}
func TestGetOldestRunningPod(t *testing.T) {
args := []string{"get", "pods", "-o", "--sort-by=.metadata.creationTimestamp", "jsonpath='{range .items[*]}{@.metadata.name}::{@.status.containerStatuses[*].ready}::::{end}'"}
tests := map[string]struct {
err error
output string
input string
}{
"test 101": {},
"test 102": {input: "true::false::::true::false"},
"test 103": {input: "true::true::::true::false", output: "true"},
"test 201": {err: errors.New("some running err")},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return(mock.input, mock.err).AnyTimes()
output, err := GetOldestRunningPod(mockRunner)
if err != mock.err {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.err, err)
}
if output != mock.output {
t.Fatalf("test '%s' failed: expected value %v, actual value %v", name, mock.output, output)
}
})
}
}
func TestDeletePod(t *testing.T) {
args := []string{"delete", "pods", "someName"}
const somename = "someName"
tests := map[string]struct {
err error
}{
"test 101": {},
"test 102": {err: errors.New("some runnign err")},
}
for name, mock := range tests {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockRunner := NewMockKubeRunner(ctrl)
mockRunner.EXPECT().Run(args).Return("", mock.err).AnyTimes()
if err := DeletePod(mockRunner, somename); err != mock.err {
t.Fatalf("test '%s' failed: expected error: %s, actual error: %s", name, mock.err, err)
}
}
}

View File

@ -1,43 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package time
import (
"time"
)
// WaitFor pauses the current goroutine for the provided time.
// This function is modelled using time.ParseDuration.
//
// time.ParseDuration parses a duration string.
// A duration string is a possibly signed sequence of
// decimal numbers, each with optional fraction and a unit suffix,
// such as "300ms", "-1.5h" or "2h45m".
//
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
func WaitFor(duration string) (err error) {
// sleep interval
interval, err := time.ParseDuration(duration)
if err != nil {
return
}
sleep(interval)
return
}
var sleep = time.Sleep

View File

@ -1,72 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package time
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestWaitFor(t *testing.T) {
tests := []struct {
name string
argDuration string
wantSleep time.Duration
wantErr bool
}{
{"nanosecond", "1ns", time.Nanosecond, false},
{"microsecond", "1us", time.Microsecond, false},
{"millisecond", "1ms", time.Millisecond, false},
{"second", "1s", time.Second, false},
{"minute", "1m", time.Minute, false},
{"hour", "1h", time.Hour, false},
{"negative", "-1s", -time.Second, false},
{"fraction", "1.5s", 1500 * time.Millisecond, false},
{"combo", "1h45m", 105 * time.Minute, false},
{"error", "1a", 0, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var gotSleep time.Duration
defer swapSleep(func(d time.Duration) { gotSleep = d })()
err := WaitFor(tt.argDuration)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.wantSleep, gotSleep)
}
})
}
t.Run("real sleep is called", func(t *testing.T) {
start := time.Now()
err := WaitFor("100ms")
require.NoError(t, err)
require.Condition(t, func() bool { return time.Since(start) > 100*time.Millisecond })
})
}
func swapSleep(newSleep func(time.Duration)) func() {
oldSleep := sleep
sleep = newSleep
return func() { sleep = oldSleep }
}

View File

@ -1,73 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"os"
"strings"
)
// ENVKey is a typed variable that holds all environment
// variables
type ENVKey string
const (
// KubeNamespaceENVK is the ENV key to fetch the
// namespace where LITMUS operations will be executed
KubeNamespaceENVK ENVKey = "LITMUS_IO_KUBE_NAMESPACE"
// KubeContextENVK is the ENV key to fetch the
// kubernetes context where LITMUS operations will be executed
KubeContextENVK ENVKey = "LITMUS_IO_KUBE_CONTEXT"
// KubeConfigENVK is the ENV key to fetch the kubeconfig file path
// This kubeconfig will be used to connect to target kubernetes cluster
KubeConfigENVK ENVKey = "LITMUS_IO_KUBE_CONFIG"
// KubectlPathENVK is the ENV key to fetch kubectl executable location
KubectlPathENVK ENVKey = "LITMUS_IO_KUBECTL_PATH"
)
// KubectlPathENV gets the kubectl executable location from ENV
func KubectlPathENV() string {
val := getEnv(KubectlPathENVK)
return val
}
// KubeNamespaceENV gets the kubernetes namespace from ENV
func KubeNamespaceENV() string {
val := getEnv(KubeNamespaceENVK)
return val
}
// KubeContextENV gets the kubernetes context from ENV
func KubeContextENV() string {
val := getEnv(KubeContextENVK)
return val
}
// KubeConfigENV gets the kubeconfig file path from ENV
func KubeConfigENV() string {
val := getEnv(KubeConfigENVK)
return val
}
// getEnv fetches the environment variable value from the machine's
// environment
func getEnv(envKey ENVKey) string {
return strings.TrimSpace(os.Getenv(string(envKey)))
}

View File

@ -1,672 +0,0 @@
/*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verify
import (
"fmt"
"io/ioutil"
"strings"
"github.com/ghodss/yaml"
"github.com/openebs/litmus/pkg/kubectl"
)
// VerifyFile type defines a yaml file path that represents an installation
// and is used for various verification purposes
//
// A verify file is a yaml version of Installation struct
type VerifyFile string
// Condition type defines a condition that can be applied against a component
// or a set of components
type Condition string
const (
// UniqueNodeCond is a condition to check uniqueness of node
UniqueNodeCond Condition = "is-unique-node"
// ThreeReplicasCond is a condition to check if replica count is 3
ThreeReplicasCond Condition = "is-three-replicas"
// PVCBoundCond is a condition to check if PVC is bound
PVCBoundCond Condition = "is-pvc-bound"
// PVCUnBoundCond is a condition to check if PVC is unbound
PVCUnBoundCond Condition = "is-pvc-unbound"
)
// Action type defines a action that can be applied against a component
// or a set of components
type Action string
const (
// DeleteAnyPodAction is an action to delete any pod
DeleteAnyPodAction Action = "delete-any-pod"
// DeleteOldestPodAction is an action to delete the oldest pod
DeleteOldestPodAction Action = "delete-oldest-pod"
)
// DeleteVerifier provides contract(s) i.e. method signature(s) to evaluate
// if an installation was deleted successfully
type DeleteVerifier interface {
IsDeleted() (yes bool, err error)
}
// DeployVerifier provides contract(s) i.e. method signature(s) to evaluate
// if an installation was deployed successfully
type DeployVerifier interface {
IsDeployed() (yes bool, err error)
}
// ConnectVerifier provides contract(s) i.e. method signature(s) to evaluate
// if a connection is possible or not
type ConnectVerifier interface {
IsConnected() (yes bool, err error)
}
// RunVerifier provides contract(s) i.e. method signature(s) to evaluate
// if an entity is in a running state or not
type RunVerifier interface {
IsRunning() (yes bool, err error)
}
// ConditionVerifier provides contract(s) i.e. method signature(s) to evaluate
// if specific entities passes the condition
type ConditionVerifier interface {
IsCondition(alias string, condition Condition) (yes bool, err error)
}
// ActionVerifier provides contract(s) i.e. method signature(s) to evaluate
// if specific entities passes the action
type ActionVerifier interface {
IsAction(alias string, action Action) (yes bool, err error)
}
// DeployRunVerifier provides contract(s) i.e. method signature(s) to
// evaluate:
//
// 1/ if an entity is deployed &,
// 2/ if the entity is running
type DeployRunVerifier interface {
// DeployVerifier will check if the instance has been deployed or not
DeployVerifier
// RunVerifier will check if the instance is in a running state or not
RunVerifier
}
// AllVerifier provides contract(s) i.e. method signature(s) to
// evaluate:
//
// - if an entity is deleted,
// - if an entity is deployed,
// - if the entity is running,
// - if the entity satisfies the provided condition &
// - if the entity satisfies the provided action
type AllVerifier interface {
// DeleteVerifier will check if the instance has been deleted or not
DeleteVerifier
// DeployVerifier will check if the instance has been deployed or not
DeployVerifier
// RunVerifier will check if the instance is in a running state or not
RunVerifier
// ConditionVerifier will check if the instance satisfies the provided
// condition
ConditionVerifier
// ActionVerifier will check if the instance satisfies the provided action
ActionVerifier
}
// Installation represents a set of components that represent an installation
// e.g. an operator represented by its CRDs, RBACs and Deployments forms an
// installation
//
// NOTE:
// Installation struct is accepted as a yaml file that is meant to be verified.
// In addition this file allows the testing logic to take appropriate actions
// as directed in the .feature file.
type Installation struct {
// VerifyID is an identifier that is used to tie together related installations
// meant to be verified
VerifyID string `json:"verifyID"`
// Version of this installation, operator etc
Version string `json:"version"`
// Components of this installation
Components []Component `json:"components"`
}
// Component is the information about a particular component
// e.g. a Kubernetes Deployment, or a Kubernetes Pod, etc can be
// a component in the overall installation
type Component struct {
// Name of the component
Name string `json:"name"`
// Namespace of the component
Namespace string `json:"namespace"`
// Kind name of the component
// e.g. pods, deployments, services, etc
Kind string `json:"kind"`
// APIVersion of the component
APIVersion string `json:"apiVersion"`
// Labels of the component that is used for filtering the components
//
// Following are some valid sample values for labels:
//
// labels: name=app
// labels: name=app,env=prod
Labels string `json:"labels"`
// Alias provides a user understood description used for filtering the
// components. This is a single word setting.
//
// NOTE:
// Ensure unique alias values in an installation
//
// DETAILS:
// This is the text which is typically understood by the end user. This text
// which will be set in the installation file against a particular component.
// Verification logic will filter the component based on this alias & run
// various checks &/or actions
Alias string `json:"alias"`
}
// unmarshal takes the raw yaml data and unmarshals it into Installation
func unmarshal(data []byte) (installation *Installation, err error) {
installation = &Installation{}
err = yaml.Unmarshal(data, installation)
return
}
// load converts a verify file into an instance of *Installation
func load(file VerifyFile) (installation *Installation, err error) {
if len(file) == 0 {
err = fmt.Errorf("failed to load: verify file is not provided")
return
}
d, err := ioutil.ReadFile(string(file))
if err != nil {
return
}
return unmarshal(d)
}
// KubeInstallVerify provides methods that handles verification related logic of
// an installation within kubernetes e.g. application, deployment, operator, etc
type KubeInstallVerify struct {
// installation is the set of components that determine the install
installation *Installation
}
// NewKubeInstallVerify provides a new instance of NewKubeInstallVerify based on
// the provided kubernetes runner & verify file
func NewKubeInstallVerify(file VerifyFile) (*KubeInstallVerify, error) {
i, err := load(file)
if err != nil {
return nil, err
}
return &KubeInstallVerify{
installation: i,
}, nil
}
// IsDeployed evaluates if all components of the installation are deployed
func (v *KubeInstallVerify) IsDeployed() (yes bool, err error) {
if v.installation == nil {
err = fmt.Errorf("failed to check IsDeployed: installation object is nil")
return
}
for _, component := range v.installation.Components {
yes, err = v.isComponentDeployed(component)
if err != nil {
break
}
}
return
}
// IsDeleted evaluates if all components of the installation are deleted
func (v *KubeInstallVerify) IsDeleted() (yes bool, err error) {
if v.installation == nil {
err = fmt.Errorf("failed to check IsDeleted: installation object is nil")
return
}
for _, component := range v.installation.Components {
yes, err = v.isComponentDeleted(component)
if err != nil {
break
}
}
return
}
// IsRunning evaluates if all components of the installation are running
func (v *KubeInstallVerify) IsRunning() (yes bool, err error) {
if v.installation == nil {
err = fmt.Errorf("failed to check IsRunning: installation object is nil")
return
}
for _, component := range v.installation.Components {
if component.Kind != "pod" {
continue
}
yes, err = v.isPodComponentRunning(component)
if err != nil {
break
}
}
return
}
// IsCondition evaluates if specific components satisfies the condition
func (v *KubeInstallVerify) IsCondition(alias string, condition Condition) (yes bool, err error) {
switch condition {
case UniqueNodeCond:
return v.isEachComponentOnUniqueNode(alias)
case ThreeReplicasCond:
return v.hasComponentThreeReplicas(alias)
case PVCBoundCond:
return v.isPVCBound(alias)
case PVCUnBoundCond:
return v.isPVCUnBound(alias)
default:
err = fmt.Errorf("condition '%s' is not supported", condition)
}
return
}
// IsAction evaluates if specific components satisfies the action
func (v *KubeInstallVerify) IsAction(alias string, action Action) (yes bool, err error) {
switch action {
case DeleteAnyPodAction:
return v.isDeleteAnyRunningPod(alias)
case DeleteOldestPodAction:
return v.isDeleteOldestRunningPod(alias)
default:
err = fmt.Errorf("action '%s' is not supported", action)
}
return
}
// isDeleteAnyPod deletes a pod based on the alias
func (v *KubeInstallVerify) isDeleteAnyRunningPod(alias string) (yes bool, err error) {
var pods = []string{}
c, err := v.getMatchingPodComponent(alias)
if err != nil {
return
}
if len(strings.TrimSpace(c.Labels)) == 0 {
err = fmt.Errorf("unable to fetch component '%s': component labels are missing '%s'", c.Kind, alias)
return
}
k := kubectl.New().Namespace(c.Namespace).Labels(c.Labels)
pods, err = kubectl.GetRunningPods(k)
if err != nil {
return
}
if len(pods) == 0 {
err = fmt.Errorf("failed to delete any running pod: pods with alias '%s' and running state are not found", alias)
return
}
// delete any running pod
k = kubectl.New().Namespace(c.Namespace)
err = kubectl.DeletePod(k, pods[0])
if err != nil {
return
}
yes = true
return
}
// isDeleteOldestRunningPod deletes the oldset pod based on the alias
func (v *KubeInstallVerify) isDeleteOldestRunningPod(alias string) (yes bool, err error) {
var pod string
c, err := v.getMatchingPodComponent(alias)
if err != nil {
return
}
// check for presence of labels
if len(strings.TrimSpace(c.Labels)) == 0 {
err = fmt.Errorf("unable to fetch component '%s': component labels are missing '%s'", c.Kind, alias)
return
}
// fetch oldest running pod
k := kubectl.New().Namespace(c.Namespace).Labels(c.Labels)
pod, err = kubectl.GetOldestRunningPod(k)
if err != nil {
return
}
if len(pod) == 0 {
err = fmt.Errorf("failed to delete oldest running pod: pod with alias '%s' and running state is not found", alias)
return
}
// delete oldest running pod
k = kubectl.New().Namespace(c.Namespace)
err = kubectl.DeletePod(k, pod)
if err != nil {
return
}
yes = true
return
}
func (v *KubeInstallVerify) getMatchingPodComponent(alias string) (comp Component, err error) {
var filtered = []Component{}
// filter the components that are pods & match with the provided alias
for _, c := range v.installation.Components {
if c.Alias == alias && kubectl.IsPod(c.Kind) {
filtered = append(filtered, c)
}
}
if len(filtered) == 0 {
err = fmt.Errorf("component not found for alias '%s'", alias)
return
}
// there should be only one component that matches the alias
if len(filtered) > 1 {
err = fmt.Errorf("multiple components found for alias '%s': alias should be unique in an install", alias)
return
}
return filtered[0], nil
}
// isComponentDeleted flags if a particular component is deleted
func (v *KubeInstallVerify) isComponentDeleted(component Component) (yes bool, err error) {
var op string
if len(strings.TrimSpace(component.Kind)) == 0 {
err = fmt.Errorf("unable to verify component delete status: component kind is missing")
return
}
// either name or labels is required
if len(strings.TrimSpace(component.Name)) == 0 && len(strings.TrimSpace(component.Labels)) == 0 {
err = fmt.Errorf("unable to verify component delete status: either component name or its labels is required")
return
}
// check via name
if len(strings.TrimSpace(component.Name)) != 0 {
op, err = kubectl.New().
Namespace(component.Namespace).
Run([]string{"get", component.Kind, component.Name})
if err == nil {
err = fmt.Errorf("component '%#v' is not deleted: output '%s'", component, op)
return
}
if strings.Contains(err.Error(), "(NotFound)") {
// yes, it is deleted
yes = true
// We wanted to make sure that this component was deleted.
// Hence the get operation is expected to result in NotFound error
// from server. Now we can reset the err to nil to let the flow
// continue
err = nil
return
}
err = fmt.Errorf("unable to verify delete status of component '%#v': output '%s'", component, op)
return
}
// or check via labels
op, err = kubectl.New().
Namespace(component.Namespace).
Labels(component.Labels).
Run([]string{"get", component.Kind})
if err != nil {
return
}
if len(strings.TrimSpace(op)) == 0 || strings.Contains(op, "No resources found") {
// yes, it is deleted
yes = true
return
}
err = fmt.Errorf("unable to verify delete status of component '%#v': output '%s'", component, op)
return
}
// isComponentDeployed flags if a particular component is deployed
func (v *KubeInstallVerify) isComponentDeployed(component Component) (yes bool, err error) {
var op string
if len(strings.TrimSpace(component.Kind)) == 0 {
err = fmt.Errorf("unable to verify component deploy status: component kind is missing")
return
}
// either name or labels is required
if len(strings.TrimSpace(component.Name)) == 0 && len(strings.TrimSpace(component.Labels)) == 0 {
err = fmt.Errorf("unable to verify component deploy status: either component name or its labels is required")
return
}
// check via name
if len(strings.TrimSpace(component.Name)) != 0 {
op, err = kubectl.New().
Namespace(component.Namespace).
Run([]string{"get", component.Kind, component.Name, "-o", "jsonpath='{.metadata.name}'"})
if err == nil && len(strings.TrimSpace(op)) != 0 {
// yes, it is deployed
yes = true
}
return
}
// or check via labels
op, err = kubectl.New().
Namespace(component.Namespace).
Labels(component.Labels).
Run([]string{"get", component.Kind, "-o", "jsonpath='{.items[*].metadata.name}'"})
if err == nil && len(strings.TrimSpace(op)) != 0 {
// yes, it is deployed
yes = true
}
return
}
// isPodComponentRunning flags if a particular component is running
func (v *KubeInstallVerify) isPodComponentRunning(component Component) (yes bool, err error) {
// either name or labels is required
if len(strings.TrimSpace(component.Name)) == 0 && len(strings.TrimSpace(component.Labels)) == 0 {
err = fmt.Errorf("unable to verify pod component running status: either name or its labels is required")
return
}
// check via name
if len(strings.TrimSpace(component.Name)) != 0 {
k := kubectl.New().Namespace(component.Namespace)
return kubectl.IsPodRunning(k, component.Name)
}
// or check via labels
k := kubectl.New().
Namespace(component.Namespace).
Labels(component.Labels)
return kubectl.ArePodsRunning(k)
}
// hasComponentThreeReplicas flags if a component has three replicas
func (v *KubeInstallVerify) hasComponentThreeReplicas(alias string) (yes bool, err error) {
err = fmt.Errorf("hasComponentThreeReplicas is not implemented")
return
}
// getPVCVolume fetches the PVC's volume
func (v *KubeInstallVerify) getPVCVolume(alias string) (op string, err error) {
var filtered = []Component{}
// filter the components based on the provided alias
for _, c := range v.installation.Components {
if c.Alias == alias {
filtered = append(filtered, c)
}
}
if len(filtered) == 0 {
err = fmt.Errorf("unable to check pvc bound status: no component with alias '%s'", alias)
return
}
if len(filtered) > 1 {
err = fmt.Errorf("unable to check pvc bound status: more than one components found with alias '%s'", alias)
return
}
if len(filtered[0].Name) == 0 {
err = fmt.Errorf("unable to check pvc bound status: component name is required: '%#v'", filtered[0])
return
}
if filtered[0].Kind != "pvc" {
err = fmt.Errorf("unable to check pvc bound status: component is not a pvc resource: '%#v'", filtered[0])
return
}
op, err = kubectl.New().
Namespace(filtered[0].Namespace).
Run([]string{"get", "pvc", filtered[0].Name, "-o", "jsonpath='{.spec.volumeName}'"})
return
}
// isPVCBound flags if a PVC component is bound
func (v *KubeInstallVerify) isPVCBound(alias string) (yes bool, err error) {
var vol string
vol, err = v.getPVCVolume(alias)
if err != nil {
return
}
// if no vol then pvc is not bound
if len(strings.TrimSpace(vol)) == 0 {
err = fmt.Errorf("pvc component is not bound")
return
}
yes = true
return
}
// isPVCUnBound flags if a PVC component is unbound
func (v *KubeInstallVerify) isPVCUnBound(alias string) (yes bool, err error) {
var vol string
vol, err = v.getPVCVolume(alias)
if err != nil {
return
}
// if no vol then pvc is not bound
if len(strings.TrimSpace(vol)) != 0 {
err = fmt.Errorf("pvc component is bound")
return
}
yes = true
return
}
// isEachComponentOnUniqueNode flags if each component is placed on unique node
func (v *KubeInstallVerify) isEachComponentOnUniqueNode(alias string) (bool, error) {
var filtered = []Component{}
var nodes = []string{}
// filter the components based on the provided alias
for _, c := range v.installation.Components {
if c.Alias == alias {
filtered = append(filtered, c)
}
}
// get the node of each filtered component
for _, f := range filtered {
// skip for non pod components
if !kubectl.IsPod(f.Kind) {
continue
}
// if pod then get the node on which it is running
if len(strings.TrimSpace(f.Labels)) == 0 {
return false, fmt.Errorf("unable to fetch component '%s' node: component labels are required", f.Kind)
}
k := kubectl.New().Namespace(f.Namespace).Labels(f.Labels)
n, err := kubectl.GetPodNodes(k)
if err != nil {
return false, err
}
nodes = append(nodes, n...)
}
// check if condition is satisfied i.e. no duplicate nodes
exists := map[string]string{}
for _, n := range nodes {
if _, ok := exists[n]; ok {
return false, nil
}
exists[n] = "tracked"
}
return true, nil
}
// KubeConnectionVerify provides methods that verifies connection to a kubernetes
// environment
type KubeConnectionVerify struct{}
// NewKubeConnectionVerify provides a new instance of KubeConnectionVerify
func NewKubeConnectionVerify() *KubeConnectionVerify {
return &KubeConnectionVerify{}
}
// IsConnected verifies if kubectl can connect to the target Kubernetes cluster
func (k *KubeConnectionVerify) IsConnected() (yes bool, err error) {
_, err = kubectl.New().Run([]string{"get", "pods"})
if err == nil {
yes = true
}
return
}

View File

@ -1,22 +0,0 @@
FROM golang:latest
MAINTAINER AmitD <amit.das@openebs.io>
# Install kubectl
ENV KUBE_LATEST_VERSION="v1.12.0"
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \
&& chmod +x /usr/local/bin/kubectl \
&& kubectl version --client
# Install go tools
RUN go get github.com/DATA-DOG/godog/cmd/godog
RUN go get -u github.com/golang/dep/cmd/dep
# Add source code
RUN mkdir -p /go/src/github.com/openebs/litmus
ADD . /go/src/github.com/openebs/litmus/
WORKDIR /go/src/github.com/openebs/litmus/
# Go dep
RUN dep ensure

View File

@ -1,2 +0,0 @@
/cmd/godog/godog
/example/example

View File

@ -1,23 +0,0 @@
language: go
go:
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
go_import_path: github.com/DATA-DOG/godog
install: go install github.com/DATA-DOG/godog/cmd/godog
script:
- go vet github.com/DATA-DOG/godog
- go vet github.com/DATA-DOG/godog/gherkin
- go vet github.com/DATA-DOG/godog/colors
- test -z "$(go fmt ./...)" # fail if not formatted properly
- godog -f progress
- go test -v -race -coverprofile=coverage.txt -covermode=atomic
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,85 +0,0 @@
# Change LOG
**2018-03-04**
- support go1.10 new compiler and linker changes for **godog** command.
**2017-08-31**
- added **BeforeFeature** and **AfterFeature** hooks.
- failed multistep error is now prepended with a parent step text in order
to determine failed nested step.
- pretty format now removes the step definition location package name in
comment next to step if the step definition matches tested package. If
step definition is imported from other package, full package name will
be printed.
**2017-05-04**
- added **--strict** option in order to fail suite when there are pending
or undefined steps. By default, suite passes and treats pending or
undefined steps as TODOs.
**2017-04-29** - **v0.7.0**
- added support for nested steps. From now on, it is possible to return
**godog.Steps** instead of an **error** in the step definition func.
This change introduced few minor changes in **Formatter** interface. Be
sure to adapt the changes if you have custom formatters.
**2017-04-27**
- added an option to randomize scenario execution order, so we could
ensure that scenarios do not depend on global state.
- godog was manually sorting feature files by name. Now it just runs them
in given order, you may sort them anyway you like. For example `godog
$(find . -name '*.feature' | sort)`
**2016-10-30** - **v0.6.0**
- added experimental **events** format, this might be used for unified
cucumber formats. But should be not adapted widely, since it is highly
possible that specification will change.
- added **RunWithOptions** method which allows to easily run godog from
**TestMain** without needing to simulate flag arguments. These options
now allows to configure output writer.
- added flag **-o, --output=runner.binary** which only compiles the test
runner executable, but does not execute it.
- **FlagSet** initialization now takes io.Writer as output for help text
output. It was not showing nice colors on windows before.
**--no-colors** option only applies to test run output.
**2016-06-14** - **v0.5.0**
- godog now uses **go tool compile** and **go tool link** to support
vendor directory dependencies. It also compiles test executable the same
way as standard **go test** utility. With this change, only go
versions from **1.5** are now supported.
**2016-06-01**
- parse flags in main command, to show version and help without needing
to compile test package and buildable go sources.
**2016-05-28**
- show nicely formatted called step func name and file path
**2016-05-26**
- pack gherkin dependency in a subpackage to prevent compatibility
conflicts in the future. If recently upgraded, probably you will need to
reference gherkin as `github.com/DATA-DOG/godog/gherkin` instead.
**2016-05-25**
- refactored test suite build tooling in order to use standard **go test**
tool. Which allows to compile package with godog runner script in **go**
idiomatic way. It also supports all build environment options as usual.
- **godog.Run** now returns an **int** exit status. It was not returning
anything before, so there is no compatibility breaks.
**2016-03-04**
- added **junit** compatible output formatter, which prints **xml**
results to **os.Stdout**
- fixed #14 which skipped printing background steps when there was
scenario outline in feature.
**2015-07-03**
- changed **godog.Suite** from interface to struct. Context registration should be updated accordingly. The reason
for change: since it exports the same methods and there is no need to mock a function in tests, there is no
obvious reason to keep an interface.
- in order to support running suite concurrently, needed to refactor an entry point of application. The **Run** method
now is a func of godog package which initializes and run the suite (or more suites). Method **New** is removed. This
change made godog a little cleaner.
- renamed **RegisterFormatter** func to **Format** to be more consistent.

View File

@ -1,28 +0,0 @@
The three clause BSD license (http://en.wikipedia.org/wiki/BSD_licenses)
Copyright (c) 2015-2018, DATA-DOG team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name DataDog.lt may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,32 +0,0 @@
.PHONY: test gherkin bump cover
VERS := $(shell grep 'const Version' -m 1 godog.go | awk -F\" '{print $$2}')
test:
@echo "running all tests"
@go install ./...
@go fmt ./...
@golint github.com/DATA-DOG/godog
@golint github.com/DATA-DOG/godog/cmd/godog
go vet ./...
go test -race
godog -f progress -c 4
gherkin:
@if [ -z "$(VERS)" ]; then echo "Provide gherkin version like: 'VERS=commit-hash'"; exit 1; fi
@rm -rf gherkin
@mkdir gherkin
@curl -s -L https://github.com/cucumber/gherkin-go/tarball/$(VERS) | tar -C gherkin -zx --strip-components 1
@rm -rf gherkin/{.travis.yml,.gitignore,*_test.go,gherkin-generate*,*.razor,*.jq,Makefile,CONTRIBUTING.md}
bump:
@if [ -z "$(VERSION)" ]; then echo "Provide version like: 'VERSION=$(VERS) make bump'"; exit 1; fi
@echo "bumping version from: $(VERS) to $(VERSION)"
@sed -i.bak 's/$(VERS)/$(VERSION)/g' godog.go
@sed -i.bak 's/$(VERS)/$(VERSION)/g' examples/api/version.feature
@find . -name '*.bak' | xargs rm
cover:
go test -race -coverprofile=coverage.txt
go tool cover -html=coverage.txt
rm coverage.txt

View File

@ -1,341 +0,0 @@
[![Build Status](https://travis-ci.org/DATA-DOG/godog.svg?branch=master)](https://travis-ci.org/DATA-DOG/godog)
[![GoDoc](https://godoc.org/github.com/DATA-DOG/godog?status.svg)](https://godoc.org/github.com/DATA-DOG/godog)
[![codecov.io](https://codecov.io/github/DATA-DOG/godog/branch/master/graph/badge.svg)](https://codecov.io/github/DATA-DOG/godog)
# Godog
<p align="center"><img src="/logo.png" alt="Godog logo" style="width:250px;" /></p>
**The API is likely to change a few times before we reach 1.0.0**
Please read all the README, you may find it very useful. And do not forget
to peek into the
[CHANGELOG](https://github.com/DATA-DOG/godog/blob/master/CHANGELOG.md)
from time to time.
Package godog is the official Cucumber BDD framework for Golang, it merges
specification and test documentation into one cohesive whole. The author
is a core member of [cucumber team](https://github.com/cucumber).
The project is inspired by [behat][behat] and [cucumber][cucumber] and is
based on cucumber [gherkin3 parser][gherkin].
**Godog** does not intervene with the standard **go test** command
behavior. You can leverage both frameworks to functionally test your
application while maintaining all test related source code in **_test.go**
files.
**Godog** acts similar compared to **go test** command, by using go
compiler and linker tool in order to produce test executable. Godog
contexts need to be exported the same way as **Test** functions for go
tests. Note, that if you use **godog** command tool, it will use `go`
executable to determine compiler and linker.
**Godog** ships gherkin parser dependency as a subpackage. This will
ensure that it is always compatible with the installed version of godog.
So in general there are no vendor dependencies needed for installation.
The following about section was taken from
[cucumber](https://cucumber.io/) homepage.
## About
#### A single source of truth
Cucumber merges specification and test documentation into one cohesive whole.
#### Living documentation
Because they're automatically tested by Cucumber, your specifications are
always bang up-to-date.
#### Focus on the customer
Business and IT don't always understand each other. Cucumber's executable
specifications encourage closer collaboration, helping teams keep the
business goal in mind at all times.
#### Less rework
When automated testing is this much fun, teams can easily protect
themselves from costly regressions.
## Install
go get github.com/DATA-DOG/godog/cmd/godog
## Example
The following example can be [found
here](/examples/godogs).
### Step 1
Given we create a new go package **$GOPATH/src/godogs**. From now on, this
is our work directory `cd $GOPATH/src/godogs`.
Imagine we have a **godog cart** to serve godogs for lunch. First of all,
we describe our feature in plain text - `vim
$GOPATH/src/godogs/features/godogs.feature`:
``` gherkin
# file: $GOPATH/src/godogs/features/godogs.feature
Feature: eat godogs
In order to be happy
As a hungry gopher
I need to be able to eat godogs
Scenario: Eat 5 out of 12
Given there are 12 godogs
When I eat 5
Then there should be 7 remaining
```
**NOTE:** same as **go test** godog respects package level isolation. All
your step definitions should be in your tested package root directory. In
this case - `$GOPATH/src/godogs`
### Step 2
If godog is installed in your GOPATH. We can run `godog` inside the
**$GOPATH/src/godogs** directory. You should see that the steps are
undefined:
![Undefined step snippets](/screenshots/undefined.png?raw=true)
If we wish to vendor godog dependency, we can do it as usual, using tools
you prefer:
git clone https://github.com/DATA-DOG/godog.git $GOPATH/src/godogs/vendor/github.com/DATA-DOG/godog
It gives you undefined step snippets to implement in your test context.
You may copy these snippets into your `godogs_test.go` file.
Our directory structure should now look like:
![Directory layout](/screenshots/dir-tree.png?raw=true)
If you copy the snippets into our test file and run godog again. We should
see the step definition is now pending:
![Pending step definition](/screenshots/pending.png?raw=true)
You may change **ErrPending** to **nil** and the scenario will
pass successfully.
Since we need a working implementation, we may start by implementing only what is necessary.
### Step 3
We only need a number of **godogs** for now. Lets keep it simple.
``` go
/* file: $GOPATH/src/godogs/godogs.go */
package main
// Godogs available to eat
var Godogs int
func main() { /* usual main func */ }
```
### Step 4
Now lets implement our step definitions, which we can copy from generated
console output snippets in order to test our feature requirements:
``` go
/* file: $GOPATH/src/godogs/godogs_test.go */
package main
import (
"fmt"
"github.com/DATA-DOG/godog"
)
func thereAreGodogs(available int) error {
Godogs = available
return nil
}
func iEat(num int) error {
if Godogs < num {
return fmt.Errorf("you cannot eat %d godogs, there are %d available", num, Godogs)
}
Godogs -= num
return nil
}
func thereShouldBeRemaining(remaining int) error {
if Godogs != remaining {
return fmt.Errorf("expected %d godogs to be remaining, but there is %d", remaining, Godogs)
}
return nil
}
func FeatureContext(s *godog.Suite) {
s.Step(`^there are (\d+) godogs$`, thereAreGodogs)
s.Step(`^I eat (\d+)$`, iEat)
s.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining)
s.BeforeScenario(func(interface{}) {
Godogs = 0 // clean the state before every scenario
})
}
```
Now when you run the `godog` again, you should see:
![Passed suite](/screenshots/passed.png?raw=true)
We have hooked to **BeforeScenario** event in order to reset application
state before each scenario. You may hook into more events, like
**AfterStep** to print all state in case of an error. Or
**BeforeSuite** to prepare a database.
By now, you should have figured out, how to use **godog**. Another advice
is to make steps orthogonal, small and simple to read for an user. Whether
the user is a dumb website user or an API developer, who may understand
a little more technical context - it should target that user.
When steps are orthogonal and small, you can combine them just like you do
with Unix tools. Look how to simplify or remove ones, which can be
composed.
### References and Tutorials
- [cucumber-html-reporter](https://github.com/gkushang/cucumber-html-reporter)
may be used in order to generate **html** reports together with
**cucumber** output formatter. See the [following docker
image](https://github.com/myie/cucumber-html-reporter) for usage
details.
- [how to use godog by semaphoreci](https://semaphoreci.com/community/tutorials/how-to-use-godog-for-behavior-driven-development-in-go)
- see [examples](https://github.com/DATA-DOG/godog/tree/master/examples)
- see extension [AssistDog](https://github.com/hellomd/assistdog), which
may have useful **gherkin.DataTable** transformations or comparison
methods for assertions.
### Documentation
See [godoc][godoc] for general API details.
See **.travis.yml** for supported **go** versions.
See `godog -h` for general command options.
See implementation examples:
- [rest API server](/examples/api)
- [rest API with Database](/examples/db)
- [godogs](/examples/godogs)
## FAQ
### Running Godog with go test
You may integrate running **godog** in your **go test** command. You can
run it using go [TestMain](https://golang.org/pkg/testing/#hdr-Main) func
available since **go 1.4**. In this case it is not necessary to have
**godog** command installed. See the following example:
``` go
func TestMain(m *testing.M) {
status := godog.RunWithOptions("godog", func(s *godog.Suite) {
FeatureContext(s)
}, godog.Options{
Format: "progress",
Paths: []string{"features"},
Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order
})
if st := m.Run(); st > status {
status = st
}
os.Exit(status)
}
```
You can even go one step further and reuse **go test** flags, like
**verbose** mode in order to switch godog **format**. See the following
example:
``` go
func TestMain(m *testing.M) {
format := "progress"
for _, arg := range os.Args[1:] {
if arg == "-test.v=true" { // go test transforms -v option
format = "pretty"
break
}
}
status := godog.RunWithOptions("godog", func(s *godog.Suite) {
godog.SuiteContext(s)
}, godog.Options{
Format: format,
Paths: []string{"features"},
})
if st := m.Run(); st > status {
status = st
}
os.Exit(status)
}
```
Now when running `go test -v` it will use **pretty** format.
### Configure common options for godog CLI
There are no global options or configuration files. Alias your common or
project based commands: `alias godog-wip="godog --format=progress
--tags=@wip"`
### Testing browser interactions
**godog** does not come with builtin packages to connect to the browser.
You may want to look at [selenium](http://www.seleniumhq.org/) and
probably [phantomjs](http://phantomjs.org/). See also the following
components:
1. [browsersteps](https://github.com/llonchj/browsersteps) - provides
basic context steps to start selenium and navigate browser content.
2. You may wish to have [goquery](https://github.com/PuerkitoBio/goquery)
in order to work with HTML responses like with JQuery.
### Concurrency
In order to support concurrency well, you should reset the state and
isolate each scenario. They should not share any state. It is suggested to
run the suite concurrently in order to make sure there is no state
corruption or race conditions in the application.
It is also useful to randomize the order of scenario execution, which you
can now do with **--random** command option.
**NOTE:** if suite runs with concurrency option, it concurrently runs
every feature, not scenario per different features. This gives
a flexibility to isolate state per feature. For example using
**BeforeFeature** hook, it is possible to spin up costly service and shut
it down only in **AfterFeature** hook and share the service between all
scenarios in that feature. It is not advisable though, because you are
risking having a state dependency.
## Contributions
Feel free to open a pull request. Note, if you wish to contribute an extension to public (exported methods or types) -
please open an issue before to discuss whether these changes can be accepted. All backward incompatible changes are
and will be treated cautiously.
## License
**Godog** is licensed under the [three clause BSD license][license]
**Gherkin** is licensed under the [MIT][gherkin-license] and developed as
a part of the [cucumber project][cucumber]
[godoc]: http://godoc.org/github.com/DATA-DOG/godog "Documentation on godoc"
[golang]: https://golang.org/ "GO programming language"
[behat]: http://docs.behat.org/ "Behavior driven development framework for PHP"
[cucumber]: https://cucumber.io/ "Behavior driven development framework"
[gherkin]: https://github.com/cucumber/gherkin-go "Gherkin3 parser for GO"
[gherkin-license]: https://en.wikipedia.org/wiki/MIT_License "The MIT license"
[license]: http://en.wikipedia.org/wiki/BSD_licenses "The three clause BSD license"

View File

@ -1,31 +0,0 @@
package godog
import "go/ast"
func astContexts(f *ast.File) []string {
var contexts []string
for _, d := range f.Decls {
switch fun := d.(type) {
case *ast.FuncDecl:
for _, param := range fun.Type.Params.List {
switch expr := param.Type.(type) {
case *ast.StarExpr:
switch x := expr.X.(type) {
case *ast.Ident:
if x.Name == "Suite" {
contexts = append(contexts, fun.Name.Name)
}
case *ast.SelectorExpr:
switch t := x.X.(type) {
case *ast.Ident:
if t.Name == "godog" && x.Sel.Name == "Suite" {
contexts = append(contexts, fun.Name.Name)
}
}
}
}
}
}
}
return contexts
}

View File

@ -1,337 +0,0 @@
// +build !go1.10
package godog
import (
"bytes"
"fmt"
"go/build"
"go/parser"
"go/token"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"text/template"
"time"
"unicode"
)
var tooldir = findToolDir()
var compiler = filepath.Join(tooldir, "compile")
var linker = filepath.Join(tooldir, "link")
var gopaths = filepath.SplitList(build.Default.GOPATH)
var goarch = build.Default.GOARCH
var goos = build.Default.GOOS
var godogImportPath = "github.com/DATA-DOG/godog"
var runnerTemplate = template.Must(template.New("testmain").Parse(`package main
import (
"github.com/DATA-DOG/godog"
{{if .Contexts}}_test "{{.ImportPath}}"{{end}}
"os"
)
func main() {
status := godog.Run("{{ .Name }}", func (suite *godog.Suite) {
os.Setenv("GODOG_TESTED_PACKAGE", "{{.ImportPath}}")
{{range .Contexts}}
_test.{{ . }}(suite)
{{end}}
})
os.Exit(status)
}`))
// Build creates a test package like go test command at given target path.
// If there are no go files in tested directory, then
// it simply builds a godog executable to scan features.
//
// If there are go test files, it first builds a test
// package with standard go test command.
//
// Finally it generates godog suite executable which
// registers exported godog contexts from the test files
// of tested package.
//
// Returns the path to generated executable
func Build(bin string) error {
abs, err := filepath.Abs(".")
if err != nil {
return err
}
// we allow package to be nil, if godog is run only when
// there is a feature file in empty directory
pkg := importPackage(abs)
src, anyContexts, err := buildTestMain(pkg)
if err != nil {
return err
}
workdir := fmt.Sprintf(filepath.Join("%s", "godog-%d"), os.TempDir(), time.Now().UnixNano())
testdir := workdir
// if none of test files exist, or there are no contexts found
// we will skip test package compilation, since it is useless
if anyContexts {
// first of all compile test package dependencies
// that will save was many compilations for dependencies
// go does it better
out, err := exec.Command("go", "test", "-i").CombinedOutput()
if err != nil {
return fmt.Errorf("failed to compile package: %s, reason: %v, output: %s", pkg.Name, err, string(out))
}
// let go do the dirty work and compile test
// package with it's dependencies. Older go
// versions does not accept existing file output
// so we create a temporary executable which will
// removed.
temp := fmt.Sprintf(filepath.Join("%s", "temp-%d.test"), os.TempDir(), time.Now().UnixNano())
// builds and compile the tested package.
// generated test executable will be removed
// since we do not need it for godog suite.
// we also print back the temp WORK directory
// go has built. We will reuse it for our suite workdir.
out, err = exec.Command("go", "test", "-c", "-work", "-o", temp).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to compile tested package: %s, reason: %v, output: %s", pkg.Name, err, string(out))
}
defer os.Remove(temp)
// extract go-build temporary directory as our workdir
workdir = strings.TrimSpace(string(out))
if !strings.HasPrefix(workdir, "WORK=") {
return fmt.Errorf("expected WORK dir path, but got: %s", workdir)
}
workdir = strings.Replace(workdir, "WORK=", "", 1)
testdir = filepath.Join(workdir, pkg.ImportPath, "_test")
} else {
// still need to create temporary workdir
if err = os.MkdirAll(testdir, 0755); err != nil {
return err
}
}
defer os.RemoveAll(workdir)
// replace _testmain.go file with our own
testmain := filepath.Join(testdir, "_testmain.go")
err = ioutil.WriteFile(testmain, src, 0644)
if err != nil {
return err
}
// godog library may not be imported in tested package
// but we need it for our testmain package.
// So we look it up in available source paths
// including vendor directory, supported since 1.5.
try := maybeVendorPaths(abs)
for _, d := range build.Default.SrcDirs() {
try = append(try, filepath.Join(d, godogImportPath))
}
godogPkg, err := locatePackage(try)
if err != nil {
return err
}
// make sure godog package archive is installed, gherkin
// will be installed as dependency of godog
cmd := exec.Command("go", "install", godogPkg.ImportPath)
cmd.Env = os.Environ()
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to install godog package: %s, reason: %v", string(out), err)
}
// collect all possible package dirs, will be
// used for includes and linker
pkgDirs := []string{workdir, testdir}
for _, gopath := range gopaths {
pkgDirs = append(pkgDirs, filepath.Join(gopath, "pkg", goos+"_"+goarch))
}
pkgDirs = uniqStringList(pkgDirs)
// compile godog testmain package archive
// we do not depend on CGO so a lot of checks are not necessary
testMainPkgOut := filepath.Join(testdir, "main.a")
args := []string{
"-o", testMainPkgOut,
// "-trimpath", workdir,
"-p", "main",
"-complete",
}
// if godog library is in vendor directory
// link it with import map
if i := strings.LastIndex(godogPkg.ImportPath, "vendor/"); i != -1 {
args = append(args, "-importmap", godogImportPath+"="+godogPkg.ImportPath)
}
for _, inc := range pkgDirs {
args = append(args, "-I", inc)
}
args = append(args, "-pack", testmain)
cmd = exec.Command(compiler, args...)
cmd.Env = os.Environ()
out, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to compile testmain package: %v - output: %s", err, string(out))
}
// link test suite executable
args = []string{
"-o", bin,
"-buildmode=exe",
}
for _, link := range pkgDirs {
args = append(args, "-L", link)
}
args = append(args, testMainPkgOut)
cmd = exec.Command(linker, args...)
cmd.Env = os.Environ()
out, err = cmd.CombinedOutput()
if err != nil {
msg := `failed to link test executable:
reason: %s
command: %s`
return fmt.Errorf(msg, string(out), linker+" '"+strings.Join(args, "' '")+"'")
}
return nil
}
func locatePackage(try []string) (*build.Package, error) {
for _, p := range try {
abs, err := filepath.Abs(p)
if err != nil {
continue
}
pkg, err := build.ImportDir(abs, 0)
if err != nil {
continue
}
return pkg, nil
}
return nil, fmt.Errorf("failed to find godog package in any of:\n%s", strings.Join(try, "\n"))
}
func importPackage(dir string) *build.Package {
pkg, _ := build.ImportDir(dir, 0)
// normalize import path for local import packages
// taken from go source code
// see: https://github.com/golang/go/blob/go1.7rc5/src/cmd/go/pkg.go#L279
if pkg != nil && pkg.ImportPath == "." {
pkg.ImportPath = path.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir)))
}
return pkg
}
// from go src
func makeImportValid(r rune) rune {
// Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport.
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return '_'
}
return r
}
func uniqStringList(strs []string) (unique []string) {
uniq := make(map[string]void, len(strs))
for _, s := range strs {
if _, ok := uniq[s]; !ok {
uniq[s] = void{}
unique = append(unique, s)
}
}
return
}
// buildTestMain if given package is valid
// it scans test files for contexts
// and produces a testmain source code.
func buildTestMain(pkg *build.Package) ([]byte, bool, error) {
var contexts []string
var importPath string
name := "main"
if nil != pkg {
ctxs, err := processPackageTestFiles(
pkg.TestGoFiles,
pkg.XTestGoFiles,
)
if err != nil {
return nil, false, err
}
contexts = ctxs
importPath = pkg.ImportPath
name = pkg.Name
}
data := struct {
Name string
Contexts []string
ImportPath string
}{name, contexts, importPath}
var buf bytes.Buffer
if err := runnerTemplate.Execute(&buf, data); err != nil {
return nil, len(contexts) > 0, err
}
return buf.Bytes(), len(contexts) > 0, nil
}
// maybeVendorPaths determines possible vendor paths
// which goes levels down from given directory
// until it reaches GOPATH source dir
func maybeVendorPaths(dir string) (paths []string) {
for _, gopath := range gopaths {
gopath = filepath.Join(gopath, "src")
for strings.HasPrefix(dir, gopath) && dir != gopath {
paths = append(paths, filepath.Join(dir, "vendor", godogImportPath))
dir = filepath.Dir(dir)
}
}
return
}
// processPackageTestFiles runs through ast of each test
// file pack and looks for godog suite contexts to register
// on run
func processPackageTestFiles(packs ...[]string) ([]string, error) {
var ctxs []string
fset := token.NewFileSet()
for _, pack := range packs {
for _, testFile := range pack {
node, err := parser.ParseFile(fset, testFile, nil, 0)
if err != nil {
return ctxs, err
}
ctxs = append(ctxs, astContexts(node)...)
}
}
var failed []string
for _, ctx := range ctxs {
runes := []rune(ctx)
if unicode.IsLower(runes[0]) {
expected := append([]rune{unicode.ToUpper(runes[0])}, runes[1:]...)
failed = append(failed, fmt.Sprintf("%s - should be: %s", ctx, string(expected)))
}
}
if len(failed) > 0 {
return ctxs, fmt.Errorf("godog contexts must be exported:\n\t%s", strings.Join(failed, "\n\t"))
}
return ctxs, nil
}
func findToolDir() string {
if out, err := exec.Command("go", "env", "GOTOOLDIR").Output(); err != nil {
return filepath.Clean(strings.TrimSpace(string(out)))
}
return filepath.Clean(build.ToolDir)
}

View File

@ -1,409 +0,0 @@
// +build go1.10
package godog
import (
"bytes"
"fmt"
"go/build"
"go/parser"
"go/token"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"text/template"
"time"
"unicode"
)
var tooldir = findToolDir()
var compiler = filepath.Join(tooldir, "compile")
var linker = filepath.Join(tooldir, "link")
var gopaths = filepath.SplitList(build.Default.GOPATH)
var goarch = build.Default.GOARCH
var goroot = build.Default.GOROOT
var goos = build.Default.GOOS
var godogImportPath = "github.com/DATA-DOG/godog"
var runnerTemplate = template.Must(template.New("testmain").Parse(`package main
import (
"github.com/DATA-DOG/godog"
{{if .Contexts}}_test "{{.ImportPath}}"{{end}}
"os"
)
func main() {
status := godog.Run("{{ .Name }}", func (suite *godog.Suite) {
os.Setenv("GODOG_TESTED_PACKAGE", "{{.ImportPath}}")
{{range .Contexts}}
_test.{{ . }}(suite)
{{end}}
})
os.Exit(status)
}`))
// Build creates a test package like go test command at given target path.
// If there are no go files in tested directory, then
// it simply builds a godog executable to scan features.
//
// If there are go test files, it first builds a test
// package with standard go test command.
//
// Finally it generates godog suite executable which
// registers exported godog contexts from the test files
// of tested package.
//
// Returns the path to generated executable
func Build(bin string) error {
abs, err := filepath.Abs(".")
if err != nil {
return err
}
// we allow package to be nil, if godog is run only when
// there is a feature file in empty directory
pkg := importPackage(abs)
src, anyContexts, err := buildTestMain(pkg)
if err != nil {
return err
}
workdir := fmt.Sprintf(filepath.Join("%s", "godog-%d"), os.TempDir(), time.Now().UnixNano())
testdir := workdir
// if none of test files exist, or there are no contexts found
// we will skip test package compilation, since it is useless
if anyContexts {
// first of all compile test package dependencies
// that will save us many compilations for dependencies
// go does it better
out, err := exec.Command("go", "test", "-i").CombinedOutput()
if err != nil {
return fmt.Errorf("failed to compile package: %s, reason: %v, output: %s", pkg.Name, err, string(out))
}
// builds and compile the tested package.
// generated test executable will be removed
// since we do not need it for godog suite.
// we also print back the temp WORK directory
// go has built. We will reuse it for our suite workdir.
out, err = exec.Command("go", "test", "-c", "-work", "-o", "/dev/null").CombinedOutput()
if err != nil {
return fmt.Errorf("failed to compile tested package: %s, reason: %v, output: %s", pkg.Name, err, string(out))
}
// extract go-build temporary directory as our workdir
workdir = strings.TrimSpace(string(out))
if !strings.HasPrefix(workdir, "WORK=") {
return fmt.Errorf("expected WORK dir path, but got: %s", workdir)
}
workdir = strings.Replace(workdir, "WORK=", "", 1)
testdir = filepath.Join(workdir, "b001")
} else {
// still need to create temporary workdir
if err = os.MkdirAll(testdir, 0755); err != nil {
return err
}
}
defer os.RemoveAll(workdir)
// replace _testmain.go file with our own
testmain := filepath.Join(testdir, "_testmain.go")
err = ioutil.WriteFile(testmain, src, 0644)
if err != nil {
return err
}
// godog library may not be imported in tested package
// but we need it for our testmain package.
// So we look it up in available source paths
// including vendor directory, supported since 1.5.
godogPkg, err := locatePackage(godogImportPath)
if err != nil {
return err
}
// make sure godog package archive is installed, gherkin
// will be installed as dependency of godog
cmd := exec.Command("go", "install", "-i", godogPkg.ImportPath)
cmd.Env = os.Environ()
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to install godog package: %s, reason: %v", string(out), err)
}
// compile godog testmain package archive
// we do not depend on CGO so a lot of checks are not necessary
testMainPkgOut := filepath.Join(testdir, "main.a")
args := []string{
"-o", testMainPkgOut,
"-p", "main",
"-complete",
}
cfg := filepath.Join(testdir, "importcfg.link")
args = append(args, "-importcfg", cfg)
if _, err := os.Stat(cfg); err != nil {
// there were no go sources in the directory
// so we need to build all dependency tree ourselves
in, err := os.Create(cfg)
if err != nil {
return err
}
fmt.Fprintln(in, "# import config")
deps := make(map[string]string)
if err := dependencies(godogPkg, deps, false); err != nil {
in.Close()
return err
}
for pkgName, pkgObj := range deps {
if i := strings.LastIndex(pkgName, "vendor/"); i != -1 {
name := pkgName[i+7:]
fmt.Fprintf(in, "importmap %s=%s\n", name, pkgName)
}
fmt.Fprintf(in, "packagefile %s=%s\n", pkgName, pkgObj)
}
in.Close()
} else {
// need to make sure that vendor dependencies are mapped
in, err := os.OpenFile(cfg, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return err
}
deps := make(map[string]string)
if err := dependencies(pkg, deps, true); err != nil {
in.Close()
return err
}
if err := dependencies(godogPkg, deps, false); err != nil {
in.Close()
return err
}
for pkgName := range deps {
if i := strings.LastIndex(pkgName, "vendor/"); i != -1 {
name := pkgName[i+7:]
fmt.Fprintf(in, "importmap %s=%s\n", name, pkgName)
}
}
in.Close()
}
args = append(args, "-pack", testmain)
cmd = exec.Command(compiler, args...)
cmd.Env = os.Environ()
out, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to compile testmain package: %v - output: %s", err, string(out))
}
// link test suite executable
args = []string{
"-o", bin,
"-importcfg", cfg,
"-buildmode=exe",
}
args = append(args, testMainPkgOut)
cmd = exec.Command(linker, args...)
cmd.Env = os.Environ()
// in case if build is without contexts, need to remove import maps
data, err := ioutil.ReadFile(cfg)
if err != nil {
return err
}
lines := strings.Split(string(data), "\n")
var fixed []string
for _, line := range lines {
if strings.Index(line, "importmap") == 0 {
continue
}
fixed = append(fixed, line)
}
if err := ioutil.WriteFile(cfg, []byte(strings.Join(fixed, "\n")), 0600); err != nil {
return err
}
out, err = cmd.CombinedOutput()
if err != nil {
msg := `failed to link test executable:
reason: %s
command: %s`
return fmt.Errorf(msg, string(out), linker+" '"+strings.Join(args, "' '")+"'")
}
return nil
}
func locatePackage(name string) (*build.Package, error) {
// search vendor paths first since that takes priority
dir, err := filepath.Abs(".")
if err != nil {
return nil, err
}
for _, gopath := range gopaths {
gopath = filepath.Join(gopath, "src")
for strings.HasPrefix(dir, gopath) && dir != gopath {
pkg, err := build.ImportDir(filepath.Join(dir, "vendor", name), 0)
if err != nil {
dir = filepath.Dir(dir)
continue
}
return pkg, nil
}
}
// search source paths otherwise
for _, p := range build.Default.SrcDirs() {
abs, err := filepath.Abs(filepath.Join(p, name))
if err != nil {
continue
}
pkg, err := build.ImportDir(abs, 0)
if err != nil {
continue
}
return pkg, nil
}
return nil, fmt.Errorf("failed to find %s package in any of:\n%s", name, strings.Join(build.Default.SrcDirs(), "\n"))
}
func importPackage(dir string) *build.Package {
pkg, _ := build.ImportDir(dir, 0)
// normalize import path for local import packages
// taken from go source code
// see: https://github.com/golang/go/blob/go1.7rc5/src/cmd/go/pkg.go#L279
if pkg != nil && pkg.ImportPath == "." {
pkg.ImportPath = path.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir)))
}
return pkg
}
// from go src
func makeImportValid(r rune) rune {
// Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport.
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return '_'
}
return r
}
func uniqStringList(strs []string) (unique []string) {
uniq := make(map[string]void, len(strs))
for _, s := range strs {
if _, ok := uniq[s]; !ok {
uniq[s] = void{}
unique = append(unique, s)
}
}
return
}
// buildTestMain if given package is valid
// it scans test files for contexts
// and produces a testmain source code.
func buildTestMain(pkg *build.Package) ([]byte, bool, error) {
var contexts []string
var importPath string
name := "main"
if nil != pkg {
ctxs, err := processPackageTestFiles(
pkg.TestGoFiles,
pkg.XTestGoFiles,
)
if err != nil {
return nil, false, err
}
contexts = ctxs
importPath = pkg.ImportPath
name = pkg.Name
}
data := struct {
Name string
Contexts []string
ImportPath string
}{name, contexts, importPath}
var buf bytes.Buffer
if err := runnerTemplate.Execute(&buf, data); err != nil {
return nil, len(contexts) > 0, err
}
return buf.Bytes(), len(contexts) > 0, nil
}
// processPackageTestFiles runs through ast of each test
// file pack and looks for godog suite contexts to register
// on run
func processPackageTestFiles(packs ...[]string) ([]string, error) {
var ctxs []string
fset := token.NewFileSet()
for _, pack := range packs {
for _, testFile := range pack {
node, err := parser.ParseFile(fset, testFile, nil, 0)
if err != nil {
return ctxs, err
}
ctxs = append(ctxs, astContexts(node)...)
}
}
var failed []string
for _, ctx := range ctxs {
runes := []rune(ctx)
if unicode.IsLower(runes[0]) {
expected := append([]rune{unicode.ToUpper(runes[0])}, runes[1:]...)
failed = append(failed, fmt.Sprintf("%s - should be: %s", ctx, string(expected)))
}
}
if len(failed) > 0 {
return ctxs, fmt.Errorf("godog contexts must be exported:\n\t%s", strings.Join(failed, "\n\t"))
}
return ctxs, nil
}
func findToolDir() string {
if out, err := exec.Command("go", "env", "GOTOOLDIR").Output(); err != nil {
return filepath.Clean(strings.TrimSpace(string(out)))
}
return filepath.Clean(build.ToolDir)
}
func dependencies(pkg *build.Package, visited map[string]string, vendor bool) error {
visited[pkg.ImportPath] = pkg.PkgObj
imports := pkg.Imports
if vendor {
imports = append(imports, pkg.TestImports...)
}
for _, name := range imports {
if i := strings.LastIndex(name, "vendor/"); vendor && i == -1 {
continue // only interested in vendor packages
}
if _, ok := visited[name]; ok {
continue
}
next, err := locatePackage(name)
if err != nil {
return err
}
visited[name] = pkg.PkgObj
if err := dependencies(next, visited, vendor); err != nil {
return err
}
}
return nil
}

View File

@ -1,18 +0,0 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build !windows
package colors
import "io"
type ansiColorWriter struct {
w io.Writer
mode outputMode
}
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
return cw.w.Write(p)
}

View File

@ -1,417 +0,0 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build windows
package colors
import (
"bytes"
"io"
"strings"
"syscall"
"unsafe"
)
type csiState int
const (
outsideCsiCode csiState = iota
firstCsiCode
secondCsiCode
)
type parseResult int
const (
noConsole parseResult = iota
changedColor
unknown
)
type ansiColorWriter struct {
w io.Writer
mode outputMode
state csiState
paramStartBuf bytes.Buffer
paramBuf bytes.Buffer
}
const (
firstCsiChar byte = '\x1b'
secondeCsiChar byte = '['
separatorChar byte = ';'
sgrCode byte = 'm'
)
const (
foregroundBlue = uint16(0x0001)
foregroundGreen = uint16(0x0002)
foregroundRed = uint16(0x0004)
foregroundIntensity = uint16(0x0008)
backgroundBlue = uint16(0x0010)
backgroundGreen = uint16(0x0020)
backgroundRed = uint16(0x0040)
backgroundIntensity = uint16(0x0080)
underscore = uint16(0x8000)
foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity
backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity
)
const (
ansiReset = "0"
ansiIntensityOn = "1"
ansiIntensityOff = "21"
ansiUnderlineOn = "4"
ansiUnderlineOff = "24"
ansiBlinkOn = "5"
ansiBlinkOff = "25"
ansiForegroundBlack = "30"
ansiForegroundRed = "31"
ansiForegroundGreen = "32"
ansiForegroundYellow = "33"
ansiForegroundBlue = "34"
ansiForegroundMagenta = "35"
ansiForegroundCyan = "36"
ansiForegroundWhite = "37"
ansiForegroundDefault = "39"
ansiBackgroundBlack = "40"
ansiBackgroundRed = "41"
ansiBackgroundGreen = "42"
ansiBackgroundYellow = "43"
ansiBackgroundBlue = "44"
ansiBackgroundMagenta = "45"
ansiBackgroundCyan = "46"
ansiBackgroundWhite = "47"
ansiBackgroundDefault = "49"
ansiLightForegroundGray = "90"
ansiLightForegroundRed = "91"
ansiLightForegroundGreen = "92"
ansiLightForegroundYellow = "93"
ansiLightForegroundBlue = "94"
ansiLightForegroundMagenta = "95"
ansiLightForegroundCyan = "96"
ansiLightForegroundWhite = "97"
ansiLightBackgroundGray = "100"
ansiLightBackgroundRed = "101"
ansiLightBackgroundGreen = "102"
ansiLightBackgroundYellow = "103"
ansiLightBackgroundBlue = "104"
ansiLightBackgroundMagenta = "105"
ansiLightBackgroundCyan = "106"
ansiLightBackgroundWhite = "107"
)
type drawType int
const (
foreground drawType = iota
background
)
type winColor struct {
code uint16
drawType drawType
}
var colorMap = map[string]winColor{
ansiForegroundBlack: {0, foreground},
ansiForegroundRed: {foregroundRed, foreground},
ansiForegroundGreen: {foregroundGreen, foreground},
ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground},
ansiForegroundBlue: {foregroundBlue, foreground},
ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground},
ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground},
ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
ansiBackgroundBlack: {0, background},
ansiBackgroundRed: {backgroundRed, background},
ansiBackgroundGreen: {backgroundGreen, background},
ansiBackgroundYellow: {backgroundRed | backgroundGreen, background},
ansiBackgroundBlue: {backgroundBlue, background},
ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background},
ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background},
ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background},
ansiBackgroundDefault: {0, background},
ansiLightForegroundGray: {foregroundIntensity, foreground},
ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground},
ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground},
ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground},
ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground},
ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground},
ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground},
ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground},
ansiLightBackgroundGray: {backgroundIntensity, background},
ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background},
ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background},
ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background},
ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background},
ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background},
ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background},
ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background},
}
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
defaultAttr *textAttributes
)
func init() {
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
if screenInfo != nil {
colorMap[ansiForegroundDefault] = winColor{
screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue),
foreground,
}
colorMap[ansiBackgroundDefault] = winColor{
screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue),
background,
}
defaultAttr = convertTextAttr(screenInfo.WAttributes)
}
}
type coord struct {
X, Y int16
}
type smallRect struct {
Left, Top, Right, Bottom int16
}
type consoleScreenBufferInfo struct {
DwSize coord
DwCursorPosition coord
WAttributes uint16
SrWindow smallRect
DwMaximumWindowSize coord
}
func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo {
var csbi consoleScreenBufferInfo
ret, _, _ := procGetConsoleScreenBufferInfo.Call(
hConsoleOutput,
uintptr(unsafe.Pointer(&csbi)))
if ret == 0 {
return nil
}
return &csbi
}
func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
ret, _, _ := procSetConsoleTextAttribute.Call(
hConsoleOutput,
uintptr(wAttributes))
return ret != 0
}
type textAttributes struct {
foregroundColor uint16
backgroundColor uint16
foregroundIntensity uint16
backgroundIntensity uint16
underscore uint16
otherAttributes uint16
}
func convertTextAttr(winAttr uint16) *textAttributes {
fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue)
bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue)
fgIntensity := winAttr & foregroundIntensity
bgIntensity := winAttr & backgroundIntensity
underline := winAttr & underscore
otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore)
return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes}
}
func convertWinAttr(textAttr *textAttributes) uint16 {
var winAttr uint16
winAttr |= textAttr.foregroundColor
winAttr |= textAttr.backgroundColor
winAttr |= textAttr.foregroundIntensity
winAttr |= textAttr.backgroundIntensity
winAttr |= textAttr.underscore
winAttr |= textAttr.otherAttributes
return winAttr
}
func changeColor(param []byte) parseResult {
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
if screenInfo == nil {
return noConsole
}
winAttr := convertTextAttr(screenInfo.WAttributes)
strParam := string(param)
if len(strParam) <= 0 {
strParam = "0"
}
csiParam := strings.Split(strParam, string(separatorChar))
for _, p := range csiParam {
c, ok := colorMap[p]
switch {
case !ok:
switch p {
case ansiReset:
winAttr.foregroundColor = defaultAttr.foregroundColor
winAttr.backgroundColor = defaultAttr.backgroundColor
winAttr.foregroundIntensity = defaultAttr.foregroundIntensity
winAttr.backgroundIntensity = defaultAttr.backgroundIntensity
winAttr.underscore = 0
winAttr.otherAttributes = 0
case ansiIntensityOn:
winAttr.foregroundIntensity = foregroundIntensity
case ansiIntensityOff:
winAttr.foregroundIntensity = 0
case ansiUnderlineOn:
winAttr.underscore = underscore
case ansiUnderlineOff:
winAttr.underscore = 0
case ansiBlinkOn:
winAttr.backgroundIntensity = backgroundIntensity
case ansiBlinkOff:
winAttr.backgroundIntensity = 0
default:
// unknown code
}
case c.drawType == foreground:
winAttr.foregroundColor = c.code
case c.drawType == background:
winAttr.backgroundColor = c.code
}
}
winTextAttribute := convertWinAttr(winAttr)
setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute)
return changedColor
}
func parseEscapeSequence(command byte, param []byte) parseResult {
if defaultAttr == nil {
return noConsole
}
switch command {
case sgrCode:
return changeColor(param)
default:
return unknown
}
}
func (cw *ansiColorWriter) flushBuffer() (int, error) {
return cw.flushTo(cw.w)
}
func (cw *ansiColorWriter) resetBuffer() (int, error) {
return cw.flushTo(nil)
}
func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) {
var n1, n2 int
var err error
startBytes := cw.paramStartBuf.Bytes()
cw.paramStartBuf.Reset()
if w != nil {
n1, err = cw.w.Write(startBytes)
if err != nil {
return n1, err
}
} else {
n1 = len(startBytes)
}
paramBytes := cw.paramBuf.Bytes()
cw.paramBuf.Reset()
if w != nil {
n2, err = cw.w.Write(paramBytes)
if err != nil {
return n1 + n2, err
}
} else {
n2 = len(paramBytes)
}
return n1 + n2, nil
}
func isParameterChar(b byte) bool {
return ('0' <= b && b <= '9') || b == separatorChar
}
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
r, nw, first, last := 0, 0, 0, 0
if cw.mode != discardNonColorEscSeq {
cw.state = outsideCsiCode
cw.resetBuffer()
}
var err error
for i, ch := range p {
switch cw.state {
case outsideCsiCode:
if ch == firstCsiChar {
cw.paramStartBuf.WriteByte(ch)
cw.state = firstCsiCode
}
case firstCsiCode:
switch ch {
case firstCsiChar:
cw.paramStartBuf.WriteByte(ch)
break
case secondeCsiChar:
cw.paramStartBuf.WriteByte(ch)
cw.state = secondCsiCode
last = i - 1
default:
cw.resetBuffer()
cw.state = outsideCsiCode
}
case secondCsiCode:
if isParameterChar(ch) {
cw.paramBuf.WriteByte(ch)
} else {
nw, err = cw.w.Write(p[first:last])
r += nw
if err != nil {
return r, err
}
first = i + 1
result := parseEscapeSequence(ch, cw.paramBuf.Bytes())
if result == noConsole || (cw.mode == outputNonColorEscSeq && result == unknown) {
cw.paramBuf.WriteByte(ch)
nw, err := cw.flushBuffer()
if err != nil {
return r, err
}
r += nw
} else {
n, _ := cw.resetBuffer()
// Add one more to the size of the buffer for the last ch
r += n + 1
}
cw.state = outsideCsiCode
}
default:
cw.state = outsideCsiCode
}
}
if cw.mode != discardNonColorEscSeq || cw.state == outsideCsiCode {
nw, err = cw.w.Write(p[first:len(p)])
r += nw
}
return r, err
}

View File

@ -1,59 +0,0 @@
package colors
import (
"fmt"
"strings"
)
const ansiEscape = "\x1b"
// a color code type
type color int
// some ansi colors
const (
black color = iota + 30
red
green
yellow
blue
magenta
cyan
white
)
func colorize(s interface{}, c color) string {
return fmt.Sprintf("%s[%dm%v%s[0m", ansiEscape, c, s, ansiEscape)
}
type ColorFunc func(interface{}) string
func Bold(fn ColorFunc) ColorFunc {
return ColorFunc(func(input interface{}) string {
return strings.Replace(fn(input), ansiEscape+"[", ansiEscape+"[1;", 1)
})
}
func Green(s interface{}) string {
return colorize(s, green)
}
func Red(s interface{}) string {
return colorize(s, red)
}
func Cyan(s interface{}) string {
return colorize(s, cyan)
}
func Black(s interface{}) string {
return colorize(s, black)
}
func Yellow(s interface{}) string {
return colorize(s, yellow)
}
func White(s interface{}) string {
return colorize(s, white)
}

View File

@ -1,57 +0,0 @@
package colors
import (
"bytes"
"fmt"
"io"
)
type noColors struct {
out io.Writer
lastbuf bytes.Buffer
}
func Uncolored(w io.Writer) io.Writer {
return &noColors{out: w}
}
func (w *noColors) Write(data []byte) (n int, err error) {
er := bytes.NewBuffer(data)
loop:
for {
c1, _, err := er.ReadRune()
if err != nil {
break loop
}
if c1 != 0x1b {
fmt.Fprint(w.out, string(c1))
continue
}
c2, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
break loop
}
if c2 != 0x5b {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
continue
}
var buf bytes.Buffer
for {
c, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
w.lastbuf.Write(buf.Bytes())
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
break
}
buf.Write([]byte(string(c)))
}
}
return len(data) - w.lastbuf.Len(), nil
}

View File

@ -1,41 +0,0 @@
// Copyright 2014 shiena Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package colors
import "io"
type outputMode int
// DiscardNonColorEscSeq supports the divided color escape sequence.
// But non-color escape sequence is not output.
// Please use the OutputNonColorEscSeq If you want to output a non-color
// escape sequences such as ncurses. However, it does not support the divided
// color escape sequence.
const (
_ outputMode = iota
discardNonColorEscSeq
outputNonColorEscSeq
)
// Colored creates and initializes a new ansiColorWriter
// using io.Writer w as its initial contents.
// In the console of Windows, which change the foreground and background
// colors of the text by the escape sequence.
// In the console of other systems, which writes to w all text.
func Colored(w io.Writer) io.Writer {
return createModeAnsiColorWriter(w, discardNonColorEscSeq)
}
// NewModeAnsiColorWriter create and initializes a new ansiColorWriter
// by specifying the outputMode.
func createModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer {
if _, ok := w.(*ansiColorWriter); !ok {
return &ansiColorWriter{
w: w,
mode: mode,
}
}
return w
}

View File

@ -1,192 +0,0 @@
package godog
import (
"flag"
"fmt"
"io"
"math/rand"
"strconv"
"strings"
"time"
"github.com/DATA-DOG/godog/colors"
)
var descFeaturesArgument = "Optional feature(s) to run. Can be:\n" +
s(4) + "- dir " + colors.Yellow("(features/)") + "\n" +
s(4) + "- feature " + colors.Yellow("(*.feature)") + "\n" +
s(4) + "- scenario at specific line " + colors.Yellow("(*.feature:10)") + "\n" +
"If no feature paths are listed, suite tries " + colors.Yellow("features") + " path by default.\n"
var descConcurrencyOption = "Run the test suite with concurrency level:\n" +
s(4) + "- " + colors.Yellow(`= 1`) + ": supports all types of formats.\n" +
s(4) + "- " + colors.Yellow(`>= 2`) + ": only supports " + colors.Yellow("progress") + ". Note, that\n" +
s(4) + "your context needs to support parallel execution."
var descTagsOption = "Filter scenarios by tags. Expression can be:\n" +
s(4) + "- " + colors.Yellow(`"@wip"`) + ": run all scenarios with wip tag\n" +
s(4) + "- " + colors.Yellow(`"~@wip"`) + ": exclude all scenarios with wip tag\n" +
s(4) + "- " + colors.Yellow(`"@wip && ~@new"`) + ": run wip scenarios, but exclude new\n" +
s(4) + "- " + colors.Yellow(`"@wip,@undone"`) + ": run wip or undone scenarios"
var descRandomOption = "Randomly shuffle the scenario execution order.\n" +
"Specify SEED to reproduce the shuffling from a previous run.\n" +
s(4) + `e.g. ` + colors.Yellow(`--random`) + " or " + colors.Yellow(`--random=5738`)
// FlagSet allows to manage flags by external suite runner
func FlagSet(opt *Options) *flag.FlagSet {
descFormatOption := "How to format tests output. Built-in formats:\n"
// @TODO: sort by name
for name, desc := range AvailableFormatters() {
descFormatOption += s(4) + "- " + colors.Yellow(name) + ": " + desc + "\n"
}
descFormatOption = strings.TrimSpace(descFormatOption)
set := flag.NewFlagSet("godog", flag.ExitOnError)
set.StringVar(&opt.Format, "format", "pretty", descFormatOption)
set.StringVar(&opt.Format, "f", "pretty", descFormatOption)
set.StringVar(&opt.Tags, "tags", "", descTagsOption)
set.StringVar(&opt.Tags, "t", "", descTagsOption)
set.IntVar(&opt.Concurrency, "concurrency", 1, descConcurrencyOption)
set.IntVar(&opt.Concurrency, "c", 1, descConcurrencyOption)
set.BoolVar(&opt.ShowStepDefinitions, "definitions", false, "Print all available step definitions.")
set.BoolVar(&opt.ShowStepDefinitions, "d", false, "Print all available step definitions.")
set.BoolVar(&opt.StopOnFailure, "stop-on-failure", false, "Stop processing on first failed scenario.")
set.BoolVar(&opt.Strict, "strict", false, "Fail suite when there are pending or undefined steps.")
set.BoolVar(&opt.NoColors, "no-colors", false, "Disable ansi colors.")
set.Var(&randomSeed{&opt.Randomize}, "random", descRandomOption)
set.Usage = usage(set, opt.Output)
return set
}
type flagged struct {
short, long, descr, dflt string
}
func (f *flagged) name() string {
var name string
switch {
case len(f.short) > 0 && len(f.long) > 0:
name = fmt.Sprintf("-%s, --%s", f.short, f.long)
case len(f.long) > 0:
name = fmt.Sprintf("--%s", f.long)
case len(f.short) > 0:
name = fmt.Sprintf("-%s", f.short)
}
if f.long == "random" {
// `random` is special in that we will later assign it randomly
// if the user specifies `--random` without specifying one,
// so mask the "default" value here to avoid UI confusion about
// what the value will end up being.
name += "[=SEED]"
} else if f.dflt != "true" && f.dflt != "false" {
name += "=" + f.dflt
}
return name
}
func usage(set *flag.FlagSet, w io.Writer) func() {
return func() {
var list []*flagged
var longest int
set.VisitAll(func(f *flag.Flag) {
var fl *flagged
for _, flg := range list {
if flg.descr == f.Usage {
fl = flg
break
}
}
if nil == fl {
fl = &flagged{
dflt: f.DefValue,
descr: f.Usage,
}
list = append(list, fl)
}
if len(f.Name) > 2 {
fl.long = f.Name
} else {
fl.short = f.Name
}
})
for _, f := range list {
if len(f.name()) > longest {
longest = len(f.name())
}
}
// prints an option or argument with a description, or only description
opt := func(name, desc string) string {
var ret []string
lines := strings.Split(desc, "\n")
ret = append(ret, s(2)+colors.Green(name)+s(longest+2-len(name))+lines[0])
if len(lines) > 1 {
for _, ln := range lines[1:] {
ret = append(ret, s(2)+s(longest+2)+ln)
}
}
return strings.Join(ret, "\n")
}
// --- GENERAL ---
fmt.Fprintln(w, colors.Yellow("Usage:"))
fmt.Fprintf(w, s(2)+"godog [options] [<features>]\n\n")
// description
fmt.Fprintln(w, "Builds a test package and runs given feature files.")
fmt.Fprintf(w, "Command should be run from the directory of tested package and contain buildable go source.\n\n")
// --- ARGUMENTS ---
fmt.Fprintln(w, colors.Yellow("Arguments:"))
// --> features
fmt.Fprintln(w, opt("features", descFeaturesArgument))
// --- OPTIONS ---
fmt.Fprintln(w, colors.Yellow("Options:"))
for _, f := range list {
fmt.Fprintln(w, opt(f.name(), f.descr))
}
fmt.Fprintln(w, "")
}
}
// randomSeed implements `flag.Value`, see https://golang.org/pkg/flag/#Value
type randomSeed struct {
ref *int64
}
// Choose randomly assigns a convenient pseudo-random seed value.
// The resulting seed will be between `1-99999` for later ease of specification.
func (rs *randomSeed) choose() {
r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
*rs.ref = r.Int63n(99998) + 1
}
func (rs *randomSeed) Set(s string) error {
if s == "true" {
rs.choose()
return nil
}
if s == "false" {
*rs.ref = 0
return nil
}
i, err := strconv.ParseInt(s, 10, 64)
*rs.ref = i
return err
}
func (rs randomSeed) String() string {
return strconv.FormatInt(*rs.ref, 10)
}
// If a Value has an IsBoolFlag() bool method returning true, the command-line
// parser makes -name equivalent to -name=true rather than using the next
// command-line argument.
func (rs *randomSeed) IsBoolFlag() bool {
return *rs.ref == 0
}

View File

@ -1,513 +0,0 @@
package godog
import (
"bytes"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"text/template"
"time"
"unicode"
"github.com/DATA-DOG/godog/colors"
"github.com/DATA-DOG/godog/gherkin"
)
// some snippet formatting regexps
var snippetExprCleanup = regexp.MustCompile("([\\/\\[\\]\\(\\)\\\\^\\$\\.\\|\\?\\*\\+\\'])")
var snippetExprQuoted = regexp.MustCompile("(\\W|^)\"(?:[^\"]*)\"(\\W|$)")
var snippetMethodName = regexp.MustCompile("[^a-zA-Z\\_\\ ]")
var snippetNumbers = regexp.MustCompile("(\\d+)")
var snippetHelperFuncs = template.FuncMap{
"backticked": func(s string) string {
return "`" + s + "`"
},
}
var undefinedSnippetsTpl = template.Must(template.New("snippets").Funcs(snippetHelperFuncs).Parse(`
{{ range . }}func {{ .Method }}({{ .Args }}) error {
return godog.ErrPending
}
{{end}}func FeatureContext(s *godog.Suite) { {{ range . }}
s.Step({{ backticked .Expr }}, {{ .Method }}){{end}}
}
`))
type undefinedSnippet struct {
Method string
Expr string
argument interface{} // gherkin step argument
}
type registeredFormatter struct {
name string
fmt FormatterFunc
description string
}
var formatters []*registeredFormatter
func findFmt(format string) FormatterFunc {
for _, el := range formatters {
if el.name == format {
return el.fmt
}
}
return nil
}
// Format registers a feature suite output
// formatter by given name, description and
// FormatterFunc constructor function, to initialize
// formatter with the output recorder.
func Format(name, description string, f FormatterFunc) {
formatters = append(formatters, &registeredFormatter{
name: name,
fmt: f,
description: description,
})
}
// AvailableFormatters gives a map of all
// formatters registered with their name as key
// and description as value
func AvailableFormatters() map[string]string {
fmts := make(map[string]string, len(formatters))
for _, f := range formatters {
fmts[f.name] = f.description
}
return fmts
}
// Formatter is an interface for feature runner
// output summary presentation.
//
// New formatters may be created to represent
// suite results in different ways. These new
// formatters needs to be registered with a
// godog.Format function call
type Formatter interface {
Feature(*gherkin.Feature, string, []byte)
Node(interface{})
Defined(*gherkin.Step, *StepDef)
Failed(*gherkin.Step, *StepDef, error)
Passed(*gherkin.Step, *StepDef)
Skipped(*gherkin.Step, *StepDef)
Undefined(*gherkin.Step, *StepDef)
Pending(*gherkin.Step, *StepDef)
Summary()
}
// FormatterFunc builds a formatter with given
// suite name and io.Writer to record output
type FormatterFunc func(string, io.Writer) Formatter
type stepType int
const (
passed stepType = iota
failed
skipped
undefined
pending
)
func (st stepType) clr() colors.ColorFunc {
switch st {
case passed:
return green
case failed:
return red
case skipped:
return cyan
default:
return yellow
}
}
func (st stepType) String() string {
switch st {
case passed:
return "passed"
case failed:
return "failed"
case skipped:
return "skipped"
case undefined:
return "undefined"
case pending:
return "pending"
default:
return "unknown"
}
}
type stepResult struct {
typ stepType
feature *feature
owner interface{}
step *gherkin.Step
def *StepDef
err error
}
func (f stepResult) line() string {
return fmt.Sprintf("%s:%d", f.feature.Path, f.step.Location.Line)
}
func (f stepResult) scenarioDesc() string {
if sc, ok := f.owner.(*gherkin.Scenario); ok {
return fmt.Sprintf("%s: %s", sc.Keyword, sc.Name)
}
if row, ok := f.owner.(*gherkin.TableRow); ok {
for _, def := range f.feature.Feature.ScenarioDefinitions {
out, ok := def.(*gherkin.ScenarioOutline)
if !ok {
continue
}
for _, ex := range out.Examples {
for _, rw := range ex.TableBody {
if rw.Location.Line == row.Location.Line {
return fmt.Sprintf("%s: %s", out.Keyword, out.Name)
}
}
}
}
}
return f.line() // was not expecting different owner
}
func (f stepResult) scenarioLine() string {
if sc, ok := f.owner.(*gherkin.Scenario); ok {
return fmt.Sprintf("%s:%d", f.feature.Path, sc.Location.Line)
}
if row, ok := f.owner.(*gherkin.TableRow); ok {
for _, def := range f.feature.Feature.ScenarioDefinitions {
out, ok := def.(*gherkin.ScenarioOutline)
if !ok {
continue
}
for _, ex := range out.Examples {
for _, rw := range ex.TableBody {
if rw.Location.Line == row.Location.Line {
return fmt.Sprintf("%s:%d", f.feature.Path, out.Location.Line)
}
}
}
}
}
return f.line() // was not expecting different owner
}
type basefmt struct {
out io.Writer
owner interface{}
indent int
started time.Time
features []*feature
failed []*stepResult
passed []*stepResult
skipped []*stepResult
undefined []*stepResult
pending []*stepResult
}
func (f *basefmt) Node(n interface{}) {
switch t := n.(type) {
case *gherkin.TableRow:
f.owner = t
case *gherkin.Scenario:
f.owner = t
}
}
func (f *basefmt) Defined(*gherkin.Step, *StepDef) {
}
func (f *basefmt) Feature(ft *gherkin.Feature, p string, c []byte) {
f.features = append(f.features, &feature{Path: p, Feature: ft})
}
func (f *basefmt) Passed(step *gherkin.Step, match *StepDef) {
s := &stepResult{
owner: f.owner,
feature: f.features[len(f.features)-1],
step: step,
def: match,
typ: passed,
}
f.passed = append(f.passed, s)
}
func (f *basefmt) Skipped(step *gherkin.Step, match *StepDef) {
s := &stepResult{
owner: f.owner,
feature: f.features[len(f.features)-1],
step: step,
def: match,
typ: skipped,
}
f.skipped = append(f.skipped, s)
}
func (f *basefmt) Undefined(step *gherkin.Step, match *StepDef) {
s := &stepResult{
owner: f.owner,
feature: f.features[len(f.features)-1],
step: step,
def: match,
typ: undefined,
}
f.undefined = append(f.undefined, s)
}
func (f *basefmt) Failed(step *gherkin.Step, match *StepDef, err error) {
s := &stepResult{
owner: f.owner,
feature: f.features[len(f.features)-1],
step: step,
def: match,
err: err,
typ: failed,
}
f.failed = append(f.failed, s)
}
func (f *basefmt) Pending(step *gherkin.Step, match *StepDef) {
s := &stepResult{
owner: f.owner,
feature: f.features[len(f.features)-1],
step: step,
def: match,
typ: pending,
}
f.pending = append(f.pending, s)
}
func (f *basefmt) Summary() {
var total, passed, undefined int
for _, ft := range f.features {
for _, def := range ft.ScenarioDefinitions {
switch t := def.(type) {
case *gherkin.Scenario:
total++
case *gherkin.ScenarioOutline:
for _, ex := range t.Examples {
if examples, hasExamples := examples(ex); hasExamples {
total += len(examples.TableBody)
}
}
}
}
}
passed = total
var owner interface{}
for _, undef := range f.undefined {
if owner != undef.owner {
undefined++
owner = undef.owner
}
}
var steps, parts, scenarios []string
nsteps := len(f.passed) + len(f.failed) + len(f.skipped) + len(f.undefined) + len(f.pending)
if len(f.passed) > 0 {
steps = append(steps, green(fmt.Sprintf("%d passed", len(f.passed))))
}
if len(f.failed) > 0 {
passed -= len(f.failed)
parts = append(parts, red(fmt.Sprintf("%d failed", len(f.failed))))
steps = append(steps, parts[len(parts)-1])
}
if len(f.pending) > 0 {
passed -= len(f.pending)
parts = append(parts, yellow(fmt.Sprintf("%d pending", len(f.pending))))
steps = append(steps, yellow(fmt.Sprintf("%d pending", len(f.pending))))
}
if len(f.undefined) > 0 {
passed -= undefined
parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefined)))
steps = append(steps, yellow(fmt.Sprintf("%d undefined", len(f.undefined))))
}
if len(f.skipped) > 0 {
steps = append(steps, cyan(fmt.Sprintf("%d skipped", len(f.skipped))))
}
if passed > 0 {
scenarios = append(scenarios, green(fmt.Sprintf("%d passed", passed)))
}
scenarios = append(scenarios, parts...)
elapsed := timeNowFunc().Sub(f.started)
fmt.Fprintln(f.out, "")
if total == 0 {
fmt.Fprintln(f.out, "No scenarios")
} else {
fmt.Fprintln(f.out, fmt.Sprintf("%d scenarios (%s)", total, strings.Join(scenarios, ", ")))
}
if nsteps == 0 {
fmt.Fprintln(f.out, "No steps")
} else {
fmt.Fprintln(f.out, fmt.Sprintf("%d steps (%s)", nsteps, strings.Join(steps, ", ")))
}
fmt.Fprintln(f.out, elapsed)
// prints used randomization seed
seed, err := strconv.ParseInt(os.Getenv("GODOG_SEED"), 10, 64)
if err == nil && seed != 0 {
fmt.Fprintln(f.out, "")
fmt.Fprintln(f.out, "Randomized with seed:", colors.Yellow(seed))
}
if text := f.snippets(); text != "" {
fmt.Fprintln(f.out, yellow("\nYou can implement step definitions for undefined steps with these snippets:"))
fmt.Fprintln(f.out, yellow(text))
}
}
func (s *undefinedSnippet) Args() (ret string) {
var args []string
var pos, idx int
var breakLoop bool
for !breakLoop {
part := s.Expr[pos:]
ipos := strings.Index(part, "(\\d+)")
spos := strings.Index(part, "\"([^\"]*)\"")
switch {
case spos == -1 && ipos == -1:
breakLoop = true
case spos == -1:
idx++
pos += ipos + len("(\\d+)")
args = append(args, reflect.Int.String())
case ipos == -1:
idx++
pos += spos + len("\"([^\"]*)\"")
args = append(args, reflect.String.String())
case ipos < spos:
idx++
pos += ipos + len("(\\d+)")
args = append(args, reflect.Int.String())
case spos < ipos:
idx++
pos += spos + len("\"([^\"]*)\"")
args = append(args, reflect.String.String())
}
}
if s.argument != nil {
idx++
switch s.argument.(type) {
case *gherkin.DocString:
args = append(args, "*gherkin.DocString")
case *gherkin.DataTable:
args = append(args, "*gherkin.DataTable")
}
}
var last string
for i, arg := range args {
if last == "" || last == arg {
ret += fmt.Sprintf("arg%d, ", i+1)
} else {
ret = strings.TrimRight(ret, ", ") + fmt.Sprintf(" %s, arg%d, ", last, i+1)
}
last = arg
}
return strings.TrimSpace(strings.TrimRight(ret, ", ") + " " + last)
}
func (f *basefmt) snippets() string {
if len(f.undefined) == 0 {
return ""
}
var index int
var snips []*undefinedSnippet
// build snippets
for _, u := range f.undefined {
steps := []string{u.step.Text}
arg := u.step.Argument
if u.def != nil {
steps = u.def.undefined
arg = nil
}
for _, step := range steps {
expr := snippetExprCleanup.ReplaceAllString(step, "\\$1")
expr = snippetNumbers.ReplaceAllString(expr, "(\\d+)")
expr = snippetExprQuoted.ReplaceAllString(expr, "$1\"([^\"]*)\"$2")
expr = "^" + strings.TrimSpace(expr) + "$"
name := snippetNumbers.ReplaceAllString(step, " ")
name = snippetExprQuoted.ReplaceAllString(name, " ")
name = strings.TrimSpace(snippetMethodName.ReplaceAllString(name, ""))
var words []string
for i, w := range strings.Split(name, " ") {
switch {
case i != 0:
w = strings.Title(w)
case len(w) > 0:
w = string(unicode.ToLower(rune(w[0]))) + w[1:]
}
words = append(words, w)
}
name = strings.Join(words, "")
if len(name) == 0 {
index++
name = fmt.Sprintf("stepDefinition%d", index)
}
var found bool
for _, snip := range snips {
if snip.Expr == expr {
found = true
break
}
}
if !found {
snips = append(snips, &undefinedSnippet{Method: name, Expr: expr, argument: arg})
}
}
}
var buf bytes.Buffer
if err := undefinedSnippetsTpl.Execute(&buf, snips); err != nil {
panic(err)
}
// there may be trailing spaces
return strings.Replace(buf.String(), " \n", "\n", -1)
}
func (f *basefmt) isLastStep(s *gherkin.Step) bool {
ft := f.features[len(f.features)-1]
for _, def := range ft.ScenarioDefinitions {
if outline, ok := def.(*gherkin.ScenarioOutline); ok {
for n, step := range outline.Steps {
if step.Location.Line == s.Location.Line {
return n == len(outline.Steps)-1
}
}
}
if scenario, ok := def.(*gherkin.Scenario); ok {
for n, step := range scenario.Steps {
if step.Location.Line == s.Location.Line {
return n == len(scenario.Steps)-1
}
}
}
}
return false
}

View File

@ -1,332 +0,0 @@
package godog
/*
The specification for the formatting originated from https://www.relishapp.com/cucumber/cucumber/docs/formatters/json-output-formatter.
I found that documentation was misleading or out dated. To validate formatting I create a ruby cucumber test harness and ran the
same feature files through godog and the ruby cucumber.
The docstrings in the cucumber.feature represent the cucumber output for those same feature definitions.
I did note that comments in ruby could be at just about any level in particular Feature, Scenario and Step. In godog I
could only find comments under the Feature data structure.
*/
import (
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"time"
"github.com/DATA-DOG/godog/gherkin"
)
func init() {
Format("cucumber", "Produces cucumber JSON format output.", cucumberFunc)
}
func cucumberFunc(suite string, out io.Writer) Formatter {
formatter := &cukefmt{
basefmt: basefmt{
started: timeNowFunc(),
indent: 2,
out: out,
},
}
return formatter
}
// Replace spaces with - This function is used to create the "id" fields of the cucumber output.
func makeID(name string) string {
return strings.Replace(strings.ToLower(name), " ", "-", -1)
}
// The sequence of type structs are used to marshall the json object.
type cukeComment struct {
Value string `json:"value"`
Line int `json:"line"`
}
type cukeDocstring struct {
Value string `json:"value"`
ContentType string `json:"content_type"`
Line int `json:"line"`
}
type cukeTag struct {
Name string `json:"name"`
Line int `json:"line"`
}
type cukeResult struct {
Status string `json:"status"`
Error string `json:"error_message,omitempty"`
Duration *int `json:"duration,omitempty"`
}
type cukeMatch struct {
Location string `json:"location"`
}
type cukeStep struct {
Keyword string `json:"keyword"`
Name string `json:"name"`
Line int `json:"line"`
Docstring *cukeDocstring `json:"doc_string,omitempty"`
Match cukeMatch `json:"match"`
Result cukeResult `json:"result"`
}
type cukeElement struct {
ID string `json:"id"`
Keyword string `json:"keyword"`
Name string `json:"name"`
Description string `json:"description"`
Line int `json:"line"`
Type string `json:"type"`
Tags []cukeTag `json:"tags,omitempty"`
Steps []cukeStep `json:"steps,omitempty"`
}
type cukeFeatureJSON struct {
URI string `json:"uri"`
ID string `json:"id"`
Keyword string `json:"keyword"`
Name string `json:"name"`
Description string `json:"description"`
Line int `json:"line"`
Comments []cukeComment `json:"comments,omitempty"`
Tags []cukeTag `json:"tags,omitempty"`
Elements []cukeElement `json:"elements,omitempty"`
}
type cukefmt struct {
basefmt
// currently running feature path, to be part of id.
// this is sadly not passed by gherkin nodes.
// it restricts this formatter to run only in synchronous single
// threaded execution. Unless running a copy of formatter for each feature
path string
stat stepType // last step status, before skipped
outlineSteps int // number of current outline scenario steps
ID string // current test id.
results []cukeFeatureJSON // structure that represent cuke results
curStep *cukeStep // track the current step
curElement *cukeElement // track the current element
curFeature *cukeFeatureJSON // track the current feature
curOutline cukeElement // Each example show up as an outline element but the outline is parsed only once
// so I need to keep track of the current outline
curRow int // current row of the example table as it is being processed.
curExampleTags []cukeTag // temporary storage for tags associate with the current example table.
startTime time.Time // used to time duration of the step execution
curExampleName string // Due to the fact that examples are parsed once and then iterated over for each result then we need to keep track
// of the example name inorder to build id fields.
}
func (f *cukefmt) Node(n interface{}) {
f.basefmt.Node(n)
switch t := n.(type) {
// When the example definition is seen we just need track the id and
// append the name associated with the example as part of the id.
case *gherkin.Examples:
f.curExampleName = makeID(t.Name)
f.curRow = 2 // there can be more than one example set per outline so reset row count.
// cucumber counts the header row as an example when creating the id.
// store any example level tags in a temp location.
f.curExampleTags = make([]cukeTag, len(t.Tags))
for idx, element := range t.Tags {
f.curExampleTags[idx].Line = element.Location.Line
f.curExampleTags[idx].Name = element.Name
}
// The outline node creates a placeholder and the actual element is added as each TableRow is processed.
case *gherkin.ScenarioOutline:
f.curOutline = cukeElement{}
f.curOutline.Name = t.Name
f.curOutline.Line = t.Location.Line
f.curOutline.Description = t.Description
f.curOutline.Keyword = t.Keyword
f.curOutline.ID = f.curFeature.ID + ";" + makeID(t.Name)
f.curOutline.Type = "scenario"
f.curOutline.Tags = make([]cukeTag, len(t.Tags)+len(f.curFeature.Tags))
// apply feature level tags
if len(f.curOutline.Tags) > 0 {
copy(f.curOutline.Tags, f.curFeature.Tags)
// apply outline level tags.
for idx, element := range t.Tags {
f.curOutline.Tags[idx+len(f.curFeature.Tags)].Line = element.Location.Line
f.curOutline.Tags[idx+len(f.curFeature.Tags)].Name = element.Name
}
}
// This scenario adds the element to the output immediately.
case *gherkin.Scenario:
f.curFeature.Elements = append(f.curFeature.Elements, cukeElement{})
f.curElement = &f.curFeature.Elements[len(f.curFeature.Elements)-1]
f.curElement.Name = t.Name
f.curElement.Line = t.Location.Line
f.curElement.Description = t.Description
f.curElement.Keyword = t.Keyword
f.curElement.ID = f.curFeature.ID + ";" + makeID(t.Name)
f.curElement.Type = "scenario"
f.curElement.Tags = make([]cukeTag, len(t.Tags)+len(f.curFeature.Tags))
if len(f.curElement.Tags) > 0 {
// apply feature level tags
copy(f.curElement.Tags, f.curFeature.Tags)
// apply scenario level tags.
for idx, element := range t.Tags {
f.curElement.Tags[idx+len(f.curFeature.Tags)].Line = element.Location.Line
f.curElement.Tags[idx+len(f.curFeature.Tags)].Name = element.Name
}
}
// This is an outline scenario and the element is added to the output as
// the TableRows are encountered.
case *gherkin.TableRow:
tmpElem := f.curOutline
tmpElem.Line = t.Location.Line
tmpElem.ID = tmpElem.ID + ";" + f.curExampleName + ";" + strconv.Itoa(f.curRow)
f.curRow++
f.curFeature.Elements = append(f.curFeature.Elements, tmpElem)
f.curElement = &f.curFeature.Elements[len(f.curFeature.Elements)-1]
// copy in example level tags.
f.curElement.Tags = append(f.curElement.Tags, f.curExampleTags...)
}
}
func (f *cukefmt) Feature(ft *gherkin.Feature, p string, c []byte) {
f.basefmt.Feature(ft, p, c)
f.path = p
f.ID = makeID(ft.Name)
f.results = append(f.results, cukeFeatureJSON{})
f.curFeature = &f.results[len(f.results)-1]
f.curFeature.URI = p
f.curFeature.Name = ft.Name
f.curFeature.Keyword = ft.Keyword
f.curFeature.Line = ft.Location.Line
f.curFeature.Description = ft.Description
f.curFeature.ID = f.ID
f.curFeature.Tags = make([]cukeTag, len(ft.Tags))
for idx, element := range ft.Tags {
f.curFeature.Tags[idx].Line = element.Location.Line
f.curFeature.Tags[idx].Name = element.Name
}
f.curFeature.Comments = make([]cukeComment, len(ft.Comments))
for idx, comment := range ft.Comments {
f.curFeature.Comments[idx].Value = strings.TrimSpace(comment.Text)
f.curFeature.Comments[idx].Line = comment.Location.Line
}
}
func (f *cukefmt) Summary() {
dat, err := json.MarshalIndent(f.results, "", " ")
if err != nil {
panic(err)
}
fmt.Fprintf(f.out, "%s\n", string(dat))
}
func (f *cukefmt) step(res *stepResult) {
// determine if test case has finished
switch t := f.owner.(type) {
case *gherkin.TableRow:
d := int(timeNowFunc().Sub(f.startTime).Nanoseconds())
f.curStep.Result.Duration = &d
f.curStep.Line = t.Location.Line
f.curStep.Result.Status = res.typ.String()
if res.err != nil {
f.curStep.Result.Error = res.err.Error()
}
case *gherkin.Scenario:
d := int(timeNowFunc().Sub(f.startTime).Nanoseconds())
f.curStep.Result.Duration = &d
f.curStep.Result.Status = res.typ.String()
if res.err != nil {
f.curStep.Result.Error = res.err.Error()
}
}
}
func (f *cukefmt) Defined(step *gherkin.Step, def *StepDef) {
f.startTime = timeNowFunc() // start timing the step
f.curElement.Steps = append(f.curElement.Steps, cukeStep{})
f.curStep = &f.curElement.Steps[len(f.curElement.Steps)-1]
f.curStep.Name = step.Text
f.curStep.Line = step.Location.Line
f.curStep.Keyword = step.Keyword
if _, ok := step.Argument.(*gherkin.DocString); ok {
f.curStep.Docstring = &cukeDocstring{}
f.curStep.Docstring.ContentType = strings.TrimSpace(step.Argument.(*gherkin.DocString).ContentType)
f.curStep.Docstring.Line = step.Argument.(*gherkin.DocString).Location.Line
f.curStep.Docstring.Value = step.Argument.(*gherkin.DocString).Content
}
if def != nil {
f.curStep.Match.Location = strings.Split(def.definitionID(), " ")[0]
}
}
func (f *cukefmt) Passed(step *gherkin.Step, match *StepDef) {
f.basefmt.Passed(step, match)
f.stat = passed
f.step(f.passed[len(f.passed)-1])
}
func (f *cukefmt) Skipped(step *gherkin.Step, match *StepDef) {
f.basefmt.Skipped(step, match)
f.step(f.skipped[len(f.skipped)-1])
// no duration reported for skipped.
f.curStep.Result.Duration = nil
}
func (f *cukefmt) Undefined(step *gherkin.Step, match *StepDef) {
f.basefmt.Undefined(step, match)
f.stat = undefined
f.step(f.undefined[len(f.undefined)-1])
// the location for undefined is the feature file location not the step file.
f.curStep.Match.Location = fmt.Sprintf("%s:%d", f.path, step.Location.Line)
f.curStep.Result.Duration = nil
}
func (f *cukefmt) Failed(step *gherkin.Step, match *StepDef, err error) {
f.basefmt.Failed(step, match, err)
f.stat = failed
f.step(f.failed[len(f.failed)-1])
}
func (f *cukefmt) Pending(step *gherkin.Step, match *StepDef) {
f.stat = pending
f.basefmt.Pending(step, match)
f.step(f.pending[len(f.pending)-1])
// the location for pending is the feature file location not the step file.
f.curStep.Match.Location = fmt.Sprintf("%s:%d", f.path, step.Location.Line)
f.curStep.Result.Duration = nil
}

View File

@ -1,269 +0,0 @@
package godog
import (
"encoding/json"
"fmt"
"io"
"github.com/DATA-DOG/godog/gherkin"
)
const nanoSec = 1000000
const spec = "0.1.0"
func init() {
Format("events", fmt.Sprintf("Produces JSON event stream, based on spec: %s.", spec), eventsFunc)
}
func eventsFunc(suite string, out io.Writer) Formatter {
formatter := &events{
basefmt: basefmt{
started: timeNowFunc(),
indent: 2,
out: out,
},
}
formatter.event(&struct {
Event string `json:"event"`
Version string `json:"version"`
Timestamp int64 `json:"timestamp"`
Suite string `json:"suite"`
}{
"TestRunStarted",
spec,
timeNowFunc().UnixNano() / nanoSec,
suite,
})
return formatter
}
type events struct {
basefmt
// currently running feature path, to be part of id.
// this is sadly not passed by gherkin nodes.
// it restricts this formatter to run only in synchronous single
// threaded execution. Unless running a copy of formatter for each feature
path string
stat stepType // last step status, before skipped
outlineSteps int // number of current outline scenario steps
}
func (f *events) event(ev interface{}) {
data, err := json.Marshal(ev)
if err != nil {
panic(fmt.Sprintf("failed to marshal stream event: %+v - %v", ev, err))
}
fmt.Fprintln(f.out, string(data))
}
func (f *events) Node(n interface{}) {
f.basefmt.Node(n)
var id string
var undefined bool
switch t := n.(type) {
case *gherkin.Scenario:
id = fmt.Sprintf("%s:%d", f.path, t.Location.Line)
undefined = len(t.Steps) == 0
case *gherkin.TableRow:
id = fmt.Sprintf("%s:%d", f.path, t.Location.Line)
undefined = f.outlineSteps == 0
case *gherkin.ScenarioOutline:
f.outlineSteps = len(t.Steps)
}
if len(id) == 0 {
return
}
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
Timestamp int64 `json:"timestamp"`
}{
"TestCaseStarted",
id,
timeNowFunc().UnixNano() / nanoSec,
})
if undefined {
// @TODO: is status undefined or passed? when there are no steps
// for this scenario
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
Timestamp int64 `json:"timestamp"`
Status string `json:"status"`
}{
"TestCaseFinished",
id,
timeNowFunc().UnixNano() / nanoSec,
"undefined",
})
}
}
func (f *events) Feature(ft *gherkin.Feature, p string, c []byte) {
f.basefmt.Feature(ft, p, c)
f.path = p
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
Source string `json:"source"`
}{
"TestSource",
fmt.Sprintf("%s:%d", p, ft.Location.Line),
string(c),
})
}
func (f *events) Summary() {
// @TODO: determine status
status := passed
if len(f.failed) > 0 {
status = failed
} else if len(f.passed) == 0 {
if len(f.undefined) > len(f.pending) {
status = undefined
} else {
status = pending
}
}
snips := f.snippets()
if len(snips) > 0 {
snips = "You can implement step definitions for undefined steps with these snippets:\n" + snips
}
f.event(&struct {
Event string `json:"event"`
Status string `json:"status"`
Timestamp int64 `json:"timestamp"`
Snippets string `json:"snippets"`
Memory string `json:"memory"`
}{
"TestRunFinished",
status.String(),
timeNowFunc().UnixNano() / nanoSec,
snips,
"", // @TODO not sure that could be correctly implemented
})
}
func (f *events) step(res *stepResult) {
var errMsg string
if res.err != nil {
errMsg = res.err.Error()
}
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
Timestamp int64 `json:"timestamp"`
Status string `json:"status"`
Summary string `json:"summary,omitempty"`
}{
"TestStepFinished",
fmt.Sprintf("%s:%d", f.path, res.step.Location.Line),
timeNowFunc().UnixNano() / nanoSec,
res.typ.String(),
errMsg,
})
// determine if test case has finished
var finished bool
var line int
switch t := f.owner.(type) {
case *gherkin.TableRow:
line = t.Location.Line
finished = f.isLastStep(res.step)
case *gherkin.Scenario:
line = t.Location.Line
finished = f.isLastStep(res.step)
}
if finished {
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
Timestamp int64 `json:"timestamp"`
Status string `json:"status"`
}{
"TestCaseFinished",
fmt.Sprintf("%s:%d", f.path, line),
timeNowFunc().UnixNano() / nanoSec,
f.stat.String(),
})
}
}
func (f *events) Defined(step *gherkin.Step, def *StepDef) {
if def != nil {
m := def.Expr.FindStringSubmatchIndex(step.Text)[2:]
var args [][2]int
for i := 0; i < len(m)/2; i++ {
pair := m[i : i*2+2]
var idxs [2]int
idxs[0] = pair[0]
idxs[1] = pair[1]
args = append(args, idxs)
}
if len(args) == 0 {
args = make([][2]int, 0)
}
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
DefID string `json:"definition_id"`
Args [][2]int `json:"arguments"`
}{
"StepDefinitionFound",
fmt.Sprintf("%s:%d", f.path, step.Location.Line),
def.definitionID(),
args,
})
}
f.event(&struct {
Event string `json:"event"`
Location string `json:"location"`
Timestamp int64 `json:"timestamp"`
}{
"TestStepStarted",
fmt.Sprintf("%s:%d", f.path, step.Location.Line),
timeNowFunc().UnixNano() / nanoSec,
})
}
func (f *events) Passed(step *gherkin.Step, match *StepDef) {
f.basefmt.Passed(step, match)
f.stat = passed
f.step(f.passed[len(f.passed)-1])
}
func (f *events) Skipped(step *gherkin.Step, match *StepDef) {
f.basefmt.Skipped(step, match)
f.step(f.skipped[len(f.skipped)-1])
}
func (f *events) Undefined(step *gherkin.Step, match *StepDef) {
f.basefmt.Undefined(step, match)
f.stat = undefined
f.step(f.undefined[len(f.undefined)-1])
}
func (f *events) Failed(step *gherkin.Step, match *StepDef, err error) {
f.basefmt.Failed(step, match, err)
f.stat = failed
f.step(f.failed[len(f.failed)-1])
}
func (f *events) Pending(step *gherkin.Step, match *StepDef) {
f.stat = pending
f.basefmt.Pending(step, match)
f.step(f.pending[len(f.pending)-1])
}

View File

@ -1,208 +0,0 @@
package godog
import (
"encoding/xml"
"fmt"
"io"
"os"
"time"
"github.com/DATA-DOG/godog/gherkin"
)
func init() {
Format("junit", "Prints junit compatible xml to stdout", junitFunc)
}
func junitFunc(suite string, out io.Writer) Formatter {
return &junitFormatter{
suite: &junitPackageSuite{
Name: suite,
TestSuites: make([]*junitTestSuite, 0),
},
out: out,
started: timeNowFunc(),
}
}
type junitFormatter struct {
suite *junitPackageSuite
out io.Writer
// timing
started time.Time
caseStarted time.Time
featStarted time.Time
outline *gherkin.ScenarioOutline
outlineExample int
}
func (j *junitFormatter) Feature(feature *gherkin.Feature, path string, c []byte) {
testSuite := &junitTestSuite{
TestCases: make([]*junitTestCase, 0),
Name: feature.Name,
}
if len(j.suite.TestSuites) > 0 {
j.current().Time = timeNowFunc().Sub(j.featStarted).String()
}
j.featStarted = timeNowFunc()
j.suite.TestSuites = append(j.suite.TestSuites, testSuite)
}
func (j *junitFormatter) Defined(*gherkin.Step, *StepDef) {
}
func (j *junitFormatter) Node(node interface{}) {
suite := j.current()
tcase := &junitTestCase{}
switch t := node.(type) {
case *gherkin.ScenarioOutline:
j.outline = t
j.outlineExample = 0
return
case *gherkin.Scenario:
tcase.Name = t.Name
suite.Tests++
j.suite.Tests++
case *gherkin.TableRow:
j.outlineExample++
tcase.Name = fmt.Sprintf("%s #%d", j.outline.Name, j.outlineExample)
suite.Tests++
j.suite.Tests++
default:
return
}
j.caseStarted = timeNowFunc()
suite.TestCases = append(suite.TestCases, tcase)
}
func (j *junitFormatter) Failed(step *gherkin.Step, match *StepDef, err error) {
suite := j.current()
suite.Failures++
j.suite.Failures++
tcase := suite.current()
tcase.Time = timeNowFunc().Sub(j.caseStarted).String()
tcase.Status = "failed"
tcase.Failure = &junitFailure{
Message: fmt.Sprintf("%s %s: %s", step.Type, step.Text, err.Error()),
}
}
func (j *junitFormatter) Passed(step *gherkin.Step, match *StepDef) {
suite := j.current()
tcase := suite.current()
tcase.Time = timeNowFunc().Sub(j.caseStarted).String()
tcase.Status = "passed"
}
func (j *junitFormatter) Skipped(step *gherkin.Step, match *StepDef) {
suite := j.current()
tcase := suite.current()
tcase.Time = timeNowFunc().Sub(j.caseStarted).String()
tcase.Error = append(tcase.Error, &junitError{
Type: "skipped",
Message: fmt.Sprintf("%s %s", step.Type, step.Text),
})
}
func (j *junitFormatter) Undefined(step *gherkin.Step, match *StepDef) {
suite := j.current()
tcase := suite.current()
if tcase.Status != "undefined" {
// do not count two undefined steps as another error
suite.Errors++
j.suite.Errors++
}
tcase.Time = timeNowFunc().Sub(j.caseStarted).String()
tcase.Status = "undefined"
tcase.Error = append(tcase.Error, &junitError{
Type: "undefined",
Message: fmt.Sprintf("%s %s", step.Type, step.Text),
})
}
func (j *junitFormatter) Pending(step *gherkin.Step, match *StepDef) {
suite := j.current()
suite.Errors++
j.suite.Errors++
tcase := suite.current()
tcase.Time = timeNowFunc().Sub(j.caseStarted).String()
tcase.Status = "pending"
tcase.Error = append(tcase.Error, &junitError{
Type: "pending",
Message: fmt.Sprintf("%s %s: TODO: write pending definition", step.Type, step.Text),
})
}
func (j *junitFormatter) Summary() {
if j.current() != nil {
j.current().Time = timeNowFunc().Sub(j.featStarted).String()
}
j.suite.Time = timeNowFunc().Sub(j.started).String()
io.WriteString(j.out, xml.Header)
enc := xml.NewEncoder(j.out)
enc.Indent("", s(2))
if err := enc.Encode(j.suite); err != nil {
fmt.Fprintln(os.Stderr, "failed to write junit xml:", err)
}
}
type junitFailure struct {
Message string `xml:"message,attr"`
Type string `xml:"type,attr,omitempty"`
}
type junitError struct {
XMLName xml.Name `xml:"error,omitempty"`
Message string `xml:"message,attr"`
Type string `xml:"type,attr"`
}
type junitTestCase struct {
XMLName xml.Name `xml:"testcase"`
Name string `xml:"name,attr"`
Status string `xml:"status,attr"`
Time string `xml:"time,attr"`
Failure *junitFailure `xml:"failure,omitempty"`
Error []*junitError
}
type junitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Name string `xml:"name,attr"`
Tests int `xml:"tests,attr"`
Skipped int `xml:"skipped,attr"`
Failures int `xml:"failures,attr"`
Errors int `xml:"errors,attr"`
Time string `xml:"time,attr"`
TestCases []*junitTestCase
}
func (ts *junitTestSuite) current() *junitTestCase {
return ts.TestCases[len(ts.TestCases)-1]
}
type junitPackageSuite struct {
XMLName xml.Name `xml:"testsuites"`
Name string `xml:"name,attr"`
Tests int `xml:"tests,attr"`
Skipped int `xml:"skipped,attr"`
Failures int `xml:"failures,attr"`
Errors int `xml:"errors,attr"`
Time string `xml:"time,attr"`
TestSuites []*junitTestSuite
}
func (j *junitFormatter) current() *junitTestSuite {
return j.suite.TestSuites[len(j.suite.TestSuites)-1]
}

View File

@ -1,456 +0,0 @@
package godog
import (
"fmt"
"io"
"math"
"regexp"
"strings"
"unicode/utf8"
"github.com/DATA-DOG/godog/colors"
"github.com/DATA-DOG/godog/gherkin"
)
func init() {
Format("pretty", "Prints every feature with runtime statuses.", prettyFunc)
}
func prettyFunc(suite string, out io.Writer) Formatter {
return &pretty{
basefmt: basefmt{
started: timeNowFunc(),
indent: 2,
out: out,
},
}
}
var outlinePlaceholderRegexp = regexp.MustCompile("<[^>]+>")
// a built in default pretty formatter
type pretty struct {
basefmt
// currently processed
feature *gherkin.Feature
scenario *gherkin.Scenario
outline *gherkin.ScenarioOutline
// state
bgSteps int
totalBgSteps int
steps int
commentPos int
// whether scenario or scenario outline keyword was printed
scenarioKeyword bool
// outline
outlineSteps []*stepResult
outlineNumExample int
outlineNumExamples int
}
func (f *pretty) Feature(ft *gherkin.Feature, p string, c []byte) {
if len(f.features) != 0 {
// not a first feature, add a newline
fmt.Fprintln(f.out, "")
}
f.features = append(f.features, &feature{Path: p, Feature: ft})
fmt.Fprintln(f.out, whiteb(ft.Keyword+": ")+ft.Name)
if strings.TrimSpace(ft.Description) != "" {
for _, line := range strings.Split(ft.Description, "\n") {
fmt.Fprintln(f.out, s(f.indent)+strings.TrimSpace(line))
}
}
f.feature = ft
f.scenario = nil
f.outline = nil
f.bgSteps = 0
f.totalBgSteps = 0
if ft.Background != nil {
f.bgSteps = len(ft.Background.Steps)
f.totalBgSteps = len(ft.Background.Steps)
}
}
// Node takes a gherkin node for formatting
func (f *pretty) Node(node interface{}) {
f.basefmt.Node(node)
switch t := node.(type) {
case *gherkin.Examples:
f.outlineNumExamples = len(t.TableBody)
f.outlineNumExample++
case *gherkin.Scenario:
f.scenario = t
f.outline = nil
f.steps = len(t.Steps) + f.totalBgSteps
f.scenarioKeyword = false
if isEmptyScenario(t) {
f.printUndefinedScenario(t)
}
case *gherkin.ScenarioOutline:
f.outline = t
f.scenario = nil
f.outlineNumExample = -1
f.scenarioKeyword = false
case *gherkin.TableRow:
f.steps = len(f.outline.Steps) + f.totalBgSteps
f.outlineSteps = []*stepResult{}
}
}
func (f *pretty) printUndefinedScenario(sc *gherkin.Scenario) {
if f.bgSteps > 0 {
f.commentPos = f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background))
fmt.Fprintln(f.out, "\n"+s(f.indent)+whiteb(f.feature.Background.Keyword+": "+f.feature.Background.Name))
for _, step := range f.feature.Background.Steps {
f.bgSteps--
f.printStep(step, nil, colors.Cyan)
}
}
text := s(f.indent) + whiteb(f.scenario.Keyword+": ") + sc.Name
text += s(f.commentPos-f.length(f.scenario)+1) + f.line(sc.Location)
fmt.Fprintln(f.out, "\n"+text)
}
// Summary sumarize the feature formatter output
func (f *pretty) Summary() {
if len(f.failed) > 0 {
fmt.Fprintln(f.out, "\n--- "+red("Failed steps:")+"\n")
for _, fail := range f.failed {
fmt.Fprintln(f.out, s(2)+red(fail.scenarioDesc())+black(" # "+fail.scenarioLine()))
fmt.Fprintln(f.out, s(4)+red(strings.TrimSpace(fail.step.Keyword)+" "+fail.step.Text)+black(" # "+fail.line()))
fmt.Fprintln(f.out, s(6)+red("Error: ")+redb(fmt.Sprintf("%+v", fail.err))+"\n")
}
}
f.basefmt.Summary()
}
func (f *pretty) printOutlineExample(outline *gherkin.ScenarioOutline) {
var msg string
var clr colors.ColorFunc
ex := outline.Examples[f.outlineNumExample]
example, hasExamples := examples(ex)
if !hasExamples {
// do not print empty examples
return
}
firstExample := f.outlineNumExamples == len(example.TableBody)
printSteps := firstExample && f.outlineNumExample == 0
for i, res := range f.outlineSteps {
// determine example row status
switch {
case res.typ == failed:
msg = res.err.Error()
clr = res.typ.clr()
case res.typ == undefined || res.typ == pending:
clr = res.typ.clr()
case res.typ == skipped && clr == nil:
clr = cyan
}
if printSteps && i >= f.totalBgSteps {
// in first example, we need to print steps
var text string
ostep := outline.Steps[i-f.totalBgSteps]
if res.def != nil {
if m := outlinePlaceholderRegexp.FindAllStringIndex(ostep.Text, -1); len(m) > 0 {
var pos int
for i := 0; i < len(m); i++ {
pair := m[i]
text += cyan(ostep.Text[pos:pair[0]])
text += cyanb(ostep.Text[pair[0]:pair[1]])
pos = pair[1]
}
text += cyan(ostep.Text[pos:len(ostep.Text)])
} else {
text = cyan(ostep.Text)
}
text += s(f.commentPos-f.length(ostep)+1) + black(fmt.Sprintf("# %s", res.def.definitionID()))
} else {
text = cyan(ostep.Text)
}
// print the step outline
fmt.Fprintln(f.out, s(f.indent*2)+cyan(strings.TrimSpace(ostep.Keyword))+" "+text)
// print step argument
// @TODO: need to make example header cells bold
switch t := ostep.Argument.(type) {
case *gherkin.DataTable:
f.printTable(t, cyan)
case *gherkin.DocString:
var ct string
if len(t.ContentType) > 0 {
ct = " " + cyan(t.ContentType)
}
fmt.Fprintln(f.out, s(f.indent*3)+cyan(t.Delimitter)+ct)
for _, ln := range strings.Split(t.Content, "\n") {
fmt.Fprintln(f.out, s(f.indent*3)+cyan(ln))
}
fmt.Fprintln(f.out, s(f.indent*3)+cyan(t.Delimitter))
}
}
}
if clr == nil {
clr = green
}
cells := make([]string, len(example.TableHeader.Cells))
max := longest(example, clr, cyan)
// an example table header
if firstExample {
fmt.Fprintln(f.out, "")
fmt.Fprintln(f.out, s(f.indent*2)+whiteb(example.Keyword+": ")+example.Name)
for i, cell := range example.TableHeader.Cells {
val := cyan(cell.Value)
ln := utf8.RuneCountInString(val)
cells[i] = val + s(max[i]-ln)
}
fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cells, " | ")+" |")
}
// an example table row
row := example.TableBody[len(example.TableBody)-f.outlineNumExamples]
for i, cell := range row.Cells {
val := clr(cell.Value)
ln := utf8.RuneCountInString(val)
cells[i] = val + s(max[i]-ln)
}
fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cells, " | ")+" |")
// if there is an error
if msg != "" {
fmt.Fprintln(f.out, s(f.indent*4)+redb(msg))
}
}
func (f *pretty) printStep(step *gherkin.Step, def *StepDef, c colors.ColorFunc) {
text := s(f.indent*2) + c(strings.TrimSpace(step.Keyword)) + " "
switch {
case def != nil:
if m := def.Expr.FindStringSubmatchIndex(step.Text)[2:]; len(m) > 0 {
var pos, i int
for pos, i = 0, 0; i < len(m); i++ {
if m[i] == -1 {
continue // no index for this match
}
if math.Mod(float64(i), 2) == 0 {
text += c(step.Text[pos:m[i]])
} else {
text += colors.Bold(c)(step.Text[pos:m[i]])
}
pos = m[i]
}
text += c(step.Text[pos:len(step.Text)])
} else {
text += c(step.Text)
}
text += s(f.commentPos-f.length(step)+1) + black(fmt.Sprintf("# %s", def.definitionID()))
default:
text += c(step.Text)
}
fmt.Fprintln(f.out, text)
switch t := step.Argument.(type) {
case *gherkin.DataTable:
f.printTable(t, c)
case *gherkin.DocString:
var ct string
if len(t.ContentType) > 0 {
ct = " " + c(t.ContentType)
}
fmt.Fprintln(f.out, s(f.indent*3)+c(t.Delimitter)+ct)
for _, ln := range strings.Split(t.Content, "\n") {
fmt.Fprintln(f.out, s(f.indent*3)+c(ln))
}
fmt.Fprintln(f.out, s(f.indent*3)+c(t.Delimitter))
}
}
func (f *pretty) printStepKind(res *stepResult) {
f.steps--
if f.outline != nil {
f.outlineSteps = append(f.outlineSteps, res)
}
var bgStep bool
// if has not printed background yet
switch {
// first background step
case f.bgSteps > 0 && f.bgSteps == len(f.feature.Background.Steps):
f.commentPos = f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background))
fmt.Fprintln(f.out, "\n"+s(f.indent)+whiteb(f.feature.Background.Keyword+": "+f.feature.Background.Name))
f.bgSteps--
bgStep = true
// subsequent background steps
case f.bgSteps > 0:
f.bgSteps--
bgStep = true
// first step of scenario, print header and calculate comment position
case f.scenario != nil:
// print scenario keyword and value if first example
if !f.scenarioKeyword {
f.commentPos = f.longestStep(f.scenario.Steps, f.length(f.scenario))
if f.feature.Background != nil {
if bgLen := f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background)); bgLen > f.commentPos {
f.commentPos = bgLen
}
}
text := s(f.indent) + whiteb(f.scenario.Keyword+": ") + f.scenario.Name
text += s(f.commentPos-f.length(f.scenario)+1) + f.line(f.scenario.Location)
fmt.Fprintln(f.out, "\n"+text)
f.scenarioKeyword = true
}
// first step of outline scenario, print header and calculate comment position
case f.outline != nil:
// print scenario keyword and value if first example
if !f.scenarioKeyword {
f.commentPos = f.longestStep(f.outline.Steps, f.length(f.outline))
if f.feature.Background != nil {
if bgLen := f.longestStep(f.feature.Background.Steps, f.length(f.feature.Background)); bgLen > f.commentPos {
f.commentPos = bgLen
}
}
text := s(f.indent) + whiteb(f.outline.Keyword+": ") + f.outline.Name
text += s(f.commentPos-f.length(f.outline)+1) + f.line(f.outline.Location)
fmt.Fprintln(f.out, "\n"+text)
f.scenarioKeyword = true
}
if len(f.outlineSteps) == len(f.outline.Steps)+f.totalBgSteps {
// an outline example steps has went through
f.printOutlineExample(f.outline)
f.outlineNumExamples--
}
return
}
if !f.isBackgroundStep(res.step) || bgStep {
f.printStep(res.step, res.def, res.typ.clr())
}
if res.err != nil {
fmt.Fprintln(f.out, s(f.indent*2)+redb(fmt.Sprintf("%+v", res.err)))
}
if res.typ == pending {
fmt.Fprintln(f.out, s(f.indent*3)+yellow("TODO: write pending definition"))
}
}
func (f *pretty) isBackgroundStep(step *gherkin.Step) bool {
if f.feature.Background == nil {
return false
}
for _, bstep := range f.feature.Background.Steps {
if bstep.Location.Line == step.Location.Line {
return true
}
}
return false
}
// print table with aligned table cells
func (f *pretty) printTable(t *gherkin.DataTable, c colors.ColorFunc) {
var l = longest(t, c)
var cols = make([]string, len(t.Rows[0].Cells))
for _, row := range t.Rows {
for i, cell := range row.Cells {
val := c(cell.Value)
ln := utf8.RuneCountInString(val)
cols[i] = val + s(l[i]-ln)
}
fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cols, " | ")+" |")
}
}
func (f *pretty) Passed(step *gherkin.Step, match *StepDef) {
f.basefmt.Passed(step, match)
f.printStepKind(f.passed[len(f.passed)-1])
}
func (f *pretty) Skipped(step *gherkin.Step, match *StepDef) {
f.basefmt.Skipped(step, match)
f.printStepKind(f.skipped[len(f.skipped)-1])
}
func (f *pretty) Undefined(step *gherkin.Step, match *StepDef) {
f.basefmt.Undefined(step, match)
f.printStepKind(f.undefined[len(f.undefined)-1])
}
func (f *pretty) Failed(step *gherkin.Step, match *StepDef, err error) {
f.basefmt.Failed(step, match, err)
f.printStepKind(f.failed[len(f.failed)-1])
}
func (f *pretty) Pending(step *gherkin.Step, match *StepDef) {
f.basefmt.Pending(step, match)
f.printStepKind(f.pending[len(f.pending)-1])
}
// longest gives a list of longest columns of all rows in Table
func longest(tbl interface{}, clrs ...colors.ColorFunc) []int {
var rows []*gherkin.TableRow
switch t := tbl.(type) {
case *gherkin.Examples:
rows = append(rows, t.TableHeader)
rows = append(rows, t.TableBody...)
case *gherkin.DataTable:
rows = append(rows, t.Rows...)
}
longest := make([]int, len(rows[0].Cells))
for _, row := range rows {
for i, cell := range row.Cells {
for _, c := range clrs {
ln := utf8.RuneCountInString(c(cell.Value))
if longest[i] < ln {
longest[i] = ln
}
}
ln := utf8.RuneCountInString(cell.Value)
if longest[i] < ln {
longest[i] = ln
}
}
}
return longest
}
func (f *pretty) longestStep(steps []*gherkin.Step, base int) int {
ret := base
for _, step := range steps {
length := f.length(step)
if length > ret {
ret = length
}
}
return ret
}
// a line number representation in feature file
func (f *pretty) line(loc *gherkin.Location) string {
return black(fmt.Sprintf("# %s:%d", f.features[len(f.features)-1].Path, loc.Line))
}
func (f *pretty) length(node interface{}) int {
switch t := node.(type) {
case *gherkin.Background:
return f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+": "+t.Name)
case *gherkin.Step:
return f.indent*2 + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+" "+t.Text)
case *gherkin.Scenario:
return f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+": "+t.Name)
case *gherkin.ScenarioOutline:
return f.indent + utf8.RuneCountInString(strings.TrimSpace(t.Keyword)+": "+t.Name)
}
panic(fmt.Sprintf("unexpected node %T to determine length", node))
}

View File

@ -1,121 +0,0 @@
package godog
import (
"fmt"
"io"
"math"
"strings"
"sync"
"github.com/DATA-DOG/godog/gherkin"
)
func init() {
Format("progress", "Prints a character per step.", progressFunc)
}
func progressFunc(suite string, out io.Writer) Formatter {
return &progress{
basefmt: basefmt{
started: timeNowFunc(),
indent: 2,
out: out,
},
stepsPerRow: 70,
}
}
type progress struct {
basefmt
sync.Mutex
stepsPerRow int
steps int
}
func (f *progress) Node(n interface{}) {
f.Lock()
defer f.Unlock()
f.basefmt.Node(n)
}
func (f *progress) Feature(ft *gherkin.Feature, p string, c []byte) {
f.Lock()
defer f.Unlock()
f.basefmt.Feature(ft, p, c)
}
func (f *progress) Summary() {
left := math.Mod(float64(f.steps), float64(f.stepsPerRow))
if left != 0 {
if int(f.steps) > f.stepsPerRow {
fmt.Fprintf(f.out, s(f.stepsPerRow-int(left))+fmt.Sprintf(" %d\n", f.steps))
} else {
fmt.Fprintf(f.out, " %d\n", f.steps)
}
}
fmt.Fprintln(f.out, "")
if len(f.failed) > 0 {
fmt.Fprintln(f.out, "\n--- "+red("Failed steps:")+"\n")
for _, fail := range f.failed {
fmt.Fprintln(f.out, s(2)+red(fail.scenarioDesc())+black(" # "+fail.scenarioLine()))
fmt.Fprintln(f.out, s(4)+red(strings.TrimSpace(fail.step.Keyword)+" "+fail.step.Text)+black(" # "+fail.line()))
fmt.Fprintln(f.out, s(6)+red("Error: ")+redb(fmt.Sprintf("%+v", fail.err))+"\n")
}
}
f.basefmt.Summary()
}
func (f *progress) step(res *stepResult) {
switch res.typ {
case passed:
fmt.Fprint(f.out, green("."))
case skipped:
fmt.Fprint(f.out, cyan("-"))
case failed:
fmt.Fprint(f.out, red("F"))
case undefined:
fmt.Fprint(f.out, yellow("U"))
case pending:
fmt.Fprint(f.out, yellow("P"))
}
f.steps++
if math.Mod(float64(f.steps), float64(f.stepsPerRow)) == 0 {
fmt.Fprintf(f.out, " %d\n", f.steps)
}
}
func (f *progress) Passed(step *gherkin.Step, match *StepDef) {
f.Lock()
defer f.Unlock()
f.basefmt.Passed(step, match)
f.step(f.passed[len(f.passed)-1])
}
func (f *progress) Skipped(step *gherkin.Step, match *StepDef) {
f.Lock()
defer f.Unlock()
f.basefmt.Skipped(step, match)
f.step(f.skipped[len(f.skipped)-1])
}
func (f *progress) Undefined(step *gherkin.Step, match *StepDef) {
f.Lock()
defer f.Unlock()
f.basefmt.Undefined(step, match)
f.step(f.undefined[len(f.undefined)-1])
}
func (f *progress) Failed(step *gherkin.Step, match *StepDef, err error) {
f.Lock()
defer f.Unlock()
f.basefmt.Failed(step, match, err)
f.step(f.failed[len(f.failed)-1])
}
func (f *progress) Pending(step *gherkin.Step, match *StepDef) {
f.Lock()
defer f.Unlock()
f.basefmt.Pending(step, match)
f.step(f.pending[len(f.pending)-1])
}

View File

@ -1,36 +0,0 @@
package godog
import "github.com/DATA-DOG/godog/gherkin"
// examples is a helper func to cast gherkin.Examples
// or gherkin.BaseExamples if its empty
// @TODO: this should go away with gherkin update
func examples(ex interface{}) (*gherkin.Examples, bool) {
t, ok := ex.(*gherkin.Examples)
return t, ok
}
// means there are no scenarios or they do not have steps
func isEmptyFeature(ft *gherkin.Feature) bool {
for _, def := range ft.ScenarioDefinitions {
if !isEmptyScenario(def) {
return false
}
}
return true
}
// means scenario dooes not have steps
func isEmptyScenario(def interface{}) bool {
switch t := def.(type) {
case *gherkin.Scenario:
if len(t.Steps) > 0 {
return false
}
case *gherkin.ScenarioOutline:
if len(t.Steps) > 0 {
return false
}
}
return true
}

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014-2016 Cucumber Ltd, Gaspar Nagy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,3 +0,0 @@
[![Build Status](https://secure.travis-ci.org/cucumber/gherkin-go.svg)](http://travis-ci.org/cucumber/gherkin-go)
Gherkin parser/compiler for Go. Please see [Gherkin](https://github.com/cucumber/gherkin) for details.

View File

@ -1,97 +0,0 @@
package gherkin
type Location struct {
Line int `json:"line"`
Column int `json:"column"`
}
type Node struct {
Location *Location `json:"location,omitempty"`
Type string `json:"type"`
}
type Feature struct {
Node
Tags []*Tag `json:"tags"`
Language string `json:"language,omitempty"`
Keyword string `json:"keyword"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
Background *Background `json:"background,omitempty"`
ScenarioDefinitions []interface{} `json:"scenarioDefinitions"`
Comments []*Comment `json:"comments"`
}
type Comment struct {
Node
Location *Location `json:"location,omitempty"`
Text string `json:"text"`
}
type Tag struct {
Node
Location *Location `json:"location,omitempty"`
Name string `json:"name"`
}
type Background struct {
ScenarioDefinition
}
type Scenario struct {
ScenarioDefinition
Tags []*Tag `json:"tags"`
}
type ScenarioOutline struct {
ScenarioDefinition
Tags []*Tag `json:"tags"`
Examples []*Examples `json:"examples,omitempty"`
}
type Examples struct {
Node
Tags []*Tag `json:"tags"`
Keyword string `json:"keyword"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
TableHeader *TableRow `json:"tableHeader"`
TableBody []*TableRow `json:"tableBody"`
}
type TableRow struct {
Node
Cells []*TableCell `json:"cells"`
}
type TableCell struct {
Node
Value string `json:"value"`
}
type ScenarioDefinition struct {
Node
Keyword string `json:"keyword"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
Steps []*Step `json:"steps"`
}
type Step struct {
Node
Keyword string `json:"keyword"`
Text string `json:"text"`
Argument interface{} `json:"argument,omitempty"`
}
type DocString struct {
Node
ContentType string `json:"contentType,omitempty"`
Content string `json:"content"`
Delimitter string `json:"-"`
}
type DataTable struct {
Node
Rows []*TableRow `json:"rows"`
}

View File

@ -1,378 +0,0 @@
package gherkin
import (
"strings"
)
type AstBuilder interface {
Builder
GetFeature() *Feature
}
type astBuilder struct {
stack []*astNode
comments []*Comment
}
func (t *astBuilder) Reset() {
t.comments = []*Comment{}
t.stack = []*astNode{}
t.push(newAstNode(RuleType_None))
}
func (t *astBuilder) GetFeature() *Feature {
res := t.currentNode().getSingle(RuleType_Feature)
if val, ok := res.(*Feature); ok {
return val
}
return nil
}
type astNode struct {
ruleType RuleType
subNodes map[RuleType][]interface{}
}
func (a *astNode) add(rt RuleType, obj interface{}) {
a.subNodes[rt] = append(a.subNodes[rt], obj)
}
func (a *astNode) getSingle(rt RuleType) interface{} {
if val, ok := a.subNodes[rt]; ok {
for i := range val {
return val[i]
}
}
return nil
}
func (a *astNode) getItems(rt RuleType) []interface{} {
var res []interface{}
if val, ok := a.subNodes[rt]; ok {
for i := range val {
res = append(res, val[i])
}
}
return res
}
func (a *astNode) getToken(tt TokenType) *Token {
if val, ok := a.getSingle(tt.RuleType()).(*Token); ok {
return val
}
return nil
}
func (a *astNode) getTokens(tt TokenType) []*Token {
var items = a.getItems(tt.RuleType())
var tokens []*Token
for i := range items {
if val, ok := items[i].(*Token); ok {
tokens = append(tokens, val)
}
}
return tokens
}
func (t *astBuilder) currentNode() *astNode {
if len(t.stack) > 0 {
return t.stack[len(t.stack)-1]
}
return nil
}
func newAstNode(rt RuleType) *astNode {
return &astNode{
ruleType: rt,
subNodes: make(map[RuleType][]interface{}),
}
}
func NewAstBuilder() AstBuilder {
builder := new(astBuilder)
builder.comments = []*Comment{}
builder.push(newAstNode(RuleType_None))
return builder
}
func (t *astBuilder) push(n *astNode) {
t.stack = append(t.stack, n)
}
func (t *astBuilder) pop() *astNode {
x := t.stack[len(t.stack)-1]
t.stack = t.stack[:len(t.stack)-1]
return x
}
func (t *astBuilder) Build(tok *Token) (bool, error) {
if tok.Type == TokenType_Comment {
comment := new(Comment)
comment.Type = "Comment"
comment.Location = astLocation(tok)
comment.Text = tok.Text
t.comments = append(t.comments, comment)
} else {
t.currentNode().add(tok.Type.RuleType(), tok)
}
return true, nil
}
func (t *astBuilder) StartRule(r RuleType) (bool, error) {
t.push(newAstNode(r))
return true, nil
}
func (t *astBuilder) EndRule(r RuleType) (bool, error) {
node := t.pop()
transformedNode, err := t.transformNode(node)
t.currentNode().add(node.ruleType, transformedNode)
return true, err
}
func (t *astBuilder) transformNode(node *astNode) (interface{}, error) {
switch node.ruleType {
case RuleType_Step:
stepLine := node.getToken(TokenType_StepLine)
step := new(Step)
step.Type = "Step"
step.Location = astLocation(stepLine)
step.Keyword = stepLine.Keyword
step.Text = stepLine.Text
step.Argument = node.getSingle(RuleType_DataTable)
if step.Argument == nil {
step.Argument = node.getSingle(RuleType_DocString)
}
return step, nil
case RuleType_DocString:
separatorToken := node.getToken(TokenType_DocStringSeparator)
contentType := separatorToken.Text
lineTokens := node.getTokens(TokenType_Other)
var text string
for i := range lineTokens {
if i > 0 {
text += "\n"
}
text += lineTokens[i].Text
}
ds := new(DocString)
ds.Type = "DocString"
ds.Location = astLocation(separatorToken)
ds.ContentType = contentType
ds.Content = text
ds.Delimitter = DOCSTRING_SEPARATOR // TODO: remember separator
return ds, nil
case RuleType_DataTable:
rows, err := astTableRows(node)
dt := new(DataTable)
dt.Type = "DataTable"
dt.Location = rows[0].Location
dt.Rows = rows
return dt, err
case RuleType_Background:
backgroundLine := node.getToken(TokenType_BackgroundLine)
description, _ := node.getSingle(RuleType_Description).(string)
bg := new(Background)
bg.Type = "Background"
bg.Location = astLocation(backgroundLine)
bg.Keyword = backgroundLine.Keyword
bg.Name = backgroundLine.Text
bg.Description = description
bg.Steps = astSteps(node)
return bg, nil
case RuleType_Scenario_Definition:
tags := astTags(node)
scenarioNode, _ := node.getSingle(RuleType_Scenario).(*astNode)
if scenarioNode != nil {
scenarioLine := scenarioNode.getToken(TokenType_ScenarioLine)
description, _ := scenarioNode.getSingle(RuleType_Description).(string)
sc := new(Scenario)
sc.Type = "Scenario"
sc.Tags = tags
sc.Location = astLocation(scenarioLine)
sc.Keyword = scenarioLine.Keyword
sc.Name = scenarioLine.Text
sc.Description = description
sc.Steps = astSteps(scenarioNode)
return sc, nil
} else {
scenarioOutlineNode, ok := node.getSingle(RuleType_ScenarioOutline).(*astNode)
if !ok {
panic("Internal grammar error")
}
scenarioOutlineLine := scenarioOutlineNode.getToken(TokenType_ScenarioOutlineLine)
description, _ := scenarioOutlineNode.getSingle(RuleType_Description).(string)
sc := new(ScenarioOutline)
sc.Type = "ScenarioOutline"
sc.Tags = tags
sc.Location = astLocation(scenarioOutlineLine)
sc.Keyword = scenarioOutlineLine.Keyword
sc.Name = scenarioOutlineLine.Text
sc.Description = description
sc.Steps = astSteps(scenarioOutlineNode)
sc.Examples = astExamples(scenarioOutlineNode)
return sc, nil
}
case RuleType_Examples_Definition:
tags := astTags(node)
examplesNode, _ := node.getSingle(RuleType_Examples).(*astNode)
examplesLine := examplesNode.getToken(TokenType_ExamplesLine)
description, _ := examplesNode.getSingle(RuleType_Description).(string)
allRows, err := astTableRows(examplesNode)
ex := new(Examples)
ex.Type = "Examples"
ex.Tags = tags
ex.Location = astLocation(examplesLine)
ex.Keyword = examplesLine.Keyword
ex.Name = examplesLine.Text
ex.Description = description
ex.TableHeader = allRows[0]
ex.TableBody = allRows[1:]
return ex, err
case RuleType_Description:
lineTokens := node.getTokens(TokenType_Other)
// Trim trailing empty lines
end := len(lineTokens)
for end > 0 && strings.TrimSpace(lineTokens[end-1].Text) == "" {
end--
}
var desc []string
for i := range lineTokens[0:end] {
desc = append(desc, lineTokens[i].Text)
}
return strings.Join(desc, "\n"), nil
case RuleType_Feature:
header, ok := node.getSingle(RuleType_Feature_Header).(*astNode)
if !ok {
return nil, nil
}
tags := astTags(header)
featureLine := header.getToken(TokenType_FeatureLine)
if featureLine == nil {
return nil, nil
}
background, _ := node.getSingle(RuleType_Background).(*Background)
scenarioDefinitions := node.getItems(RuleType_Scenario_Definition)
if scenarioDefinitions == nil {
scenarioDefinitions = []interface{}{}
}
description, _ := header.getSingle(RuleType_Description).(string)
feat := new(Feature)
feat.Type = "Feature"
feat.Tags = tags
feat.Location = astLocation(featureLine)
feat.Language = featureLine.GherkinDialect
feat.Keyword = featureLine.Keyword
feat.Name = featureLine.Text
feat.Description = description
feat.Background = background
feat.ScenarioDefinitions = scenarioDefinitions
feat.Comments = t.comments
return feat, nil
}
return node, nil
}
func astLocation(t *Token) *Location {
return &Location{
Line: t.Location.Line,
Column: t.Location.Column,
}
}
func astTableRows(t *astNode) (rows []*TableRow, err error) {
rows = []*TableRow{}
tokens := t.getTokens(TokenType_TableRow)
for i := range tokens {
row := new(TableRow)
row.Type = "TableRow"
row.Location = astLocation(tokens[i])
row.Cells = astTableCells(tokens[i])
rows = append(rows, row)
}
err = ensureCellCount(rows)
return
}
func ensureCellCount(rows []*TableRow) error {
if len(rows) <= 1 {
return nil
}
cellCount := len(rows[0].Cells)
for i := range rows {
if cellCount != len(rows[i].Cells) {
return &parseError{"inconsistent cell count within the table", &Location{
Line: rows[i].Location.Line,
Column: rows[i].Location.Column,
}}
}
}
return nil
}
func astTableCells(t *Token) (cells []*TableCell) {
cells = []*TableCell{}
for i := range t.Items {
item := t.Items[i]
cell := new(TableCell)
cell.Type = "TableCell"
cell.Location = &Location{
Line: t.Location.Line,
Column: item.Column,
}
cell.Value = item.Text
cells = append(cells, cell)
}
return
}
func astSteps(t *astNode) (steps []*Step) {
steps = []*Step{}
tokens := t.getItems(RuleType_Step)
for i := range tokens {
step, _ := tokens[i].(*Step)
steps = append(steps, step)
}
return
}
func astExamples(t *astNode) (examples []*Examples) {
examples = []*Examples{}
tokens := t.getItems(RuleType_Examples_Definition)
for i := range tokens {
example, _ := tokens[i].(*Examples)
examples = append(examples, example)
}
return
}
func astTags(node *astNode) (tags []*Tag) {
tags = []*Tag{}
tagsNode, ok := node.getSingle(RuleType_Tags).(*astNode)
if !ok {
return
}
tokens := tagsNode.getTokens(TokenType_TagLine)
for i := range tokens {
token := tokens[i]
for k := range token.Items {
item := token.Items[k]
tag := new(Tag)
tag.Type = "Tag"
tag.Location = &Location{
Line: token.Location.Line,
Column: item.Column,
}
tag.Name = item.Text
tags = append(tags, tag)
}
}
return
}

View File

@ -1,47 +0,0 @@
package gherkin
type GherkinDialect struct {
Language string
Name string
Native string
Keywords map[string][]string
}
func (g *GherkinDialect) FeatureKeywords() []string {
return g.Keywords["feature"]
}
func (g *GherkinDialect) ScenarioKeywords() []string {
return g.Keywords["scenario"]
}
func (g *GherkinDialect) StepKeywords() []string {
result := g.Keywords["given"]
result = append(result, g.Keywords["when"]...)
result = append(result, g.Keywords["then"]...)
result = append(result, g.Keywords["and"]...)
result = append(result, g.Keywords["but"]...)
return result
}
func (g *GherkinDialect) BackgroundKeywords() []string {
return g.Keywords["background"]
}
func (g *GherkinDialect) ScenarioOutlineKeywords() []string {
return g.Keywords["scenarioOutline"]
}
func (g *GherkinDialect) ExamplesKeywords() []string {
return g.Keywords["examples"]
}
type GherkinDialectProvider interface {
GetDialect(language string) *GherkinDialect
}
type gherkinDialectMap map[string]*GherkinDialect
func (g gherkinDialectMap) GetDialect(language string) *GherkinDialect {
return g[language]
}

File diff suppressed because it is too large Load Diff

View File

@ -1,137 +0,0 @@
package gherkin
import (
"bufio"
"fmt"
"io"
"strings"
)
type Parser interface {
StopAtFirstError(b bool)
Parse(s Scanner, m Matcher) (err error)
}
/*
The scanner reads a gherkin doc (typically read from a .feature file) and creates a token for
each line. The tokens are passed to the parser, which outputs an AST (Abstract Syntax Tree).
If the scanner sees a # language header, it will reconfigure itself dynamically to look for
Gherkin keywords for the associated language. The keywords are defined in gherkin-languages.json.
*/
type Scanner interface {
Scan() (line *Line, atEof bool, err error)
}
type Builder interface {
Build(*Token) (bool, error)
StartRule(RuleType) (bool, error)
EndRule(RuleType) (bool, error)
Reset()
}
type Token struct {
Type TokenType
Keyword string
Text string
Items []*LineSpan
GherkinDialect string
Indent string
Location *Location
}
func (t *Token) IsEOF() bool {
return t.Type == TokenType_EOF
}
func (t *Token) String() string {
return fmt.Sprintf("%s: %s/%s", t.Type.Name(), t.Keyword, t.Text)
}
type LineSpan struct {
Column int
Text string
}
func (l *LineSpan) String() string {
return fmt.Sprintf("%d:%s", l.Column, l.Text)
}
type parser struct {
builder Builder
stopAtFirstError bool
}
func NewParser(b Builder) Parser {
return &parser{
builder: b,
}
}
func (p *parser) StopAtFirstError(b bool) {
p.stopAtFirstError = b
}
func NewScanner(r io.Reader) Scanner {
return &scanner{
s: bufio.NewScanner(r),
line: 0,
}
}
type scanner struct {
s *bufio.Scanner
line int
}
func (t *scanner) Scan() (line *Line, atEof bool, err error) {
scanning := t.s.Scan()
if !scanning {
err = t.s.Err()
if err == nil {
atEof = true
}
}
if err == nil {
t.line += 1
str := t.s.Text()
line = &Line{str, t.line, strings.TrimLeft(str, " \t"), atEof}
}
return
}
type Line struct {
LineText string
LineNumber int
TrimmedLineText string
AtEof bool
}
func (g *Line) Indent() int {
return len(g.LineText) - len(g.TrimmedLineText)
}
func (g *Line) IsEmpty() bool {
return len(g.TrimmedLineText) == 0
}
func (g *Line) IsEof() bool {
return g.AtEof
}
func (g *Line) StartsWith(prefix string) bool {
return strings.HasPrefix(g.TrimmedLineText, prefix)
}
func ParseFeature(in io.Reader) (feature *Feature, err error) {
builder := NewAstBuilder()
parser := NewParser(builder)
parser.StopAtFirstError(false)
matcher := NewMatcher(GherkinDialectsBuildin())
scanner := NewScanner(in)
err = parser.Parse(scanner, matcher)
return builder.GetFeature(), err
}

View File

@ -1,270 +0,0 @@
package gherkin
import (
"regexp"
"strings"
"unicode/utf8"
)
const (
DEFAULT_DIALECT = "en"
COMMENT_PREFIX = "#"
TAG_PREFIX = "@"
TITLE_KEYWORD_SEPARATOR = ":"
TABLE_CELL_SEPARATOR = '|'
ESCAPE_CHAR = '\\'
ESCAPED_NEWLINE = 'n'
DOCSTRING_SEPARATOR = "\"\"\""
DOCSTRING_ALTERNATIVE_SEPARATOR = "```"
)
type matcher struct {
gdp GherkinDialectProvider
default_lang string
lang string
dialect *GherkinDialect
activeDocStringSeparator string
indentToRemove int
languagePattern *regexp.Regexp
}
func NewMatcher(gdp GherkinDialectProvider) Matcher {
return &matcher{
gdp: gdp,
default_lang: DEFAULT_DIALECT,
lang: DEFAULT_DIALECT,
dialect: gdp.GetDialect(DEFAULT_DIALECT),
languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"),
}
}
func NewLanguageMatcher(gdp GherkinDialectProvider, language string) Matcher {
return &matcher{
gdp: gdp,
default_lang: language,
lang: language,
dialect: gdp.GetDialect(language),
languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"),
}
}
func (m *matcher) Reset() {
m.indentToRemove = 0
m.activeDocStringSeparator = ""
if m.lang != "en" {
m.dialect = m.gdp.GetDialect(m.default_lang)
m.lang = "en"
}
}
func (m *matcher) newTokenAtLocation(line, index int) (token *Token) {
column := index + 1
token = new(Token)
token.GherkinDialect = m.lang
token.Location = &Location{line, column}
return
}
func (m *matcher) MatchEOF(line *Line) (ok bool, token *Token, err error) {
if line.IsEof() {
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_EOF
}
return
}
func (m *matcher) MatchEmpty(line *Line) (ok bool, token *Token, err error) {
if line.IsEmpty() {
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_Empty
}
return
}
func (m *matcher) MatchComment(line *Line) (ok bool, token *Token, err error) {
if line.StartsWith(COMMENT_PREFIX) {
token, ok = m.newTokenAtLocation(line.LineNumber, 0), true
token.Type = TokenType_Comment
token.Text = line.LineText
}
return
}
func (m *matcher) MatchTagLine(line *Line) (ok bool, token *Token, err error) {
if line.StartsWith(TAG_PREFIX) {
var tags []*LineSpan
var column = line.Indent()
splits := strings.Split(line.TrimmedLineText, TAG_PREFIX)
for i := range splits {
txt := strings.Trim(splits[i], " ")
if txt != "" {
tags = append(tags, &LineSpan{column, TAG_PREFIX + txt})
}
column = column + len(splits[i]) + 1
}
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_TagLine
token.Items = tags
}
return
}
func (m *matcher) matchTitleLine(line *Line, tokenType TokenType, keywords []string) (ok bool, token *Token, err error) {
for i := range keywords {
keyword := keywords[i]
if line.StartsWith(keyword + TITLE_KEYWORD_SEPARATOR) {
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = tokenType
token.Keyword = keyword
token.Text = strings.Trim(line.TrimmedLineText[len(keyword)+1:], " ")
return
}
}
return
}
func (m *matcher) MatchFeatureLine(line *Line) (ok bool, token *Token, err error) {
return m.matchTitleLine(line, TokenType_FeatureLine, m.dialect.FeatureKeywords())
}
func (m *matcher) MatchBackgroundLine(line *Line) (ok bool, token *Token, err error) {
return m.matchTitleLine(line, TokenType_BackgroundLine, m.dialect.BackgroundKeywords())
}
func (m *matcher) MatchScenarioLine(line *Line) (ok bool, token *Token, err error) {
return m.matchTitleLine(line, TokenType_ScenarioLine, m.dialect.ScenarioKeywords())
}
func (m *matcher) MatchScenarioOutlineLine(line *Line) (ok bool, token *Token, err error) {
return m.matchTitleLine(line, TokenType_ScenarioOutlineLine, m.dialect.ScenarioOutlineKeywords())
}
func (m *matcher) MatchExamplesLine(line *Line) (ok bool, token *Token, err error) {
return m.matchTitleLine(line, TokenType_ExamplesLine, m.dialect.ExamplesKeywords())
}
func (m *matcher) MatchStepLine(line *Line) (ok bool, token *Token, err error) {
keywords := m.dialect.StepKeywords()
for i := range keywords {
keyword := keywords[i]
if line.StartsWith(keyword) {
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_StepLine
token.Keyword = keyword
token.Text = strings.Trim(line.TrimmedLineText[len(keyword):], " ")
return
}
}
return
}
func (m *matcher) MatchDocStringSeparator(line *Line) (ok bool, token *Token, err error) {
if m.activeDocStringSeparator != "" {
if line.StartsWith(m.activeDocStringSeparator) {
// close
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_DocStringSeparator
m.indentToRemove = 0
m.activeDocStringSeparator = ""
}
return
}
if line.StartsWith(DOCSTRING_SEPARATOR) {
m.activeDocStringSeparator = DOCSTRING_SEPARATOR
} else if line.StartsWith(DOCSTRING_ALTERNATIVE_SEPARATOR) {
m.activeDocStringSeparator = DOCSTRING_ALTERNATIVE_SEPARATOR
}
if m.activeDocStringSeparator != "" {
// open
contentType := line.TrimmedLineText[len(m.activeDocStringSeparator):]
m.indentToRemove = line.Indent()
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_DocStringSeparator
token.Text = contentType
}
return
}
func (m *matcher) MatchTableRow(line *Line) (ok bool, token *Token, err error) {
var firstChar, firstPos = utf8.DecodeRuneInString(line.TrimmedLineText)
if firstChar == TABLE_CELL_SEPARATOR {
var cells []*LineSpan
var cell []rune
var startCol = line.Indent() + 2 // column where the current cell started
// start after the first separator, it's not included in the cell
for i, w, col := firstPos, 0, startCol; i < len(line.TrimmedLineText); i += w {
var char rune
char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:])
if char == TABLE_CELL_SEPARATOR {
// append current cell
txt := string(cell)
txtTrimmed := strings.TrimLeft(txt, " ")
ind := len(txt) - len(txtTrimmed)
cells = append(cells, &LineSpan{startCol + ind, strings.TrimRight(txtTrimmed, " ")})
// start building next
cell = make([]rune, 0)
startCol = col + 1
} else if char == ESCAPE_CHAR {
// skip this character but count the column
i += w
col++
char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:])
if char == ESCAPED_NEWLINE {
cell = append(cell, '\n')
} else {
if char != TABLE_CELL_SEPARATOR && char != ESCAPE_CHAR {
cell = append(cell, ESCAPE_CHAR)
}
cell = append(cell, char)
}
} else {
cell = append(cell, char)
}
col++
}
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_TableRow
token.Items = cells
}
return
}
func (m *matcher) MatchLanguage(line *Line) (ok bool, token *Token, err error) {
matches := m.languagePattern.FindStringSubmatch(line.TrimmedLineText)
if len(matches) > 0 {
lang := matches[1]
token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true
token.Type = TokenType_Language
token.Text = lang
dialect := m.gdp.GetDialect(lang)
if dialect == nil {
err = &parseError{"Language not supported: " + lang, token.Location}
} else {
m.lang = lang
m.dialect = dialect
}
}
return
}
func (m *matcher) MatchOther(line *Line) (ok bool, token *Token, err error) {
token, ok = m.newTokenAtLocation(line.LineNumber, 0), true
token.Type = TokenType_Other
element := line.LineText
txt := strings.TrimLeft(element, " ")
if len(element)-len(txt) > m.indentToRemove {
token.Text = m.unescapeDocString(element[m.indentToRemove:])
} else {
token.Text = m.unescapeDocString(txt)
}
return
}
func (m *matcher) unescapeDocString(text string) string {
if m.activeDocStringSeparator != "" {
return strings.Replace(text, "\\\"\\\"\\\"", "\"\"\"", -1)
} else {
return text
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
/*
Package godog is the official Cucumber BDD framework for Golang, it merges specification
and test documentation into one cohesive whole.
Godog does not intervene with the standard "go test" command and it's behavior.
You can leverage both frameworks to functionally test your application while
maintaining all test related source code in *_test.go files.
Godog acts similar compared to go test command. It uses go
compiler and linker tool in order to produce test executable. Godog
contexts needs to be exported same as Test functions for go test.
For example, imagine youre about to create the famous UNIX ls command.
Before you begin, you describe how the feature should work, see the example below..
Example:
Feature: ls
In order to see the directory structure
As a UNIX user
I need to be able to list the current directory's contents
Scenario:
Given I am in a directory "test"
And I have a file named "foo"
And I have a file named "bar"
When I run ls
Then I should get output:
"""
bar
foo
"""
Now, wouldnt it be cool if something could read this sentence and use it to actually
run a test against the ls command? Hey, thats exactly what this package does!
As youll see, Godog is easy to learn, quick to use, and will put the fun back into tests.
Godog was inspired by Behat and Cucumber the above description is taken from it's documentation.
*/
package godog
// Version of package - based on Semantic Versioning 2.0.0 http://semver.org/
const Version = "v0.7.6"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

View File

@ -1,59 +0,0 @@
package godog
import (
"io"
)
// Options are suite run options
// flags are mapped to these options.
//
// It can also be used together with godog.RunWithOptions
// to run test suite from go source directly
//
// See the flags for more details
type Options struct {
// Print step definitions found and exit
ShowStepDefinitions bool
// Randomize, if not `0`, will be used to run scenarios in a random order.
//
// Randomizing scenario order is especially helpful for detecting
// situations where you have state leaking between scenarios, which can
// cause flickering or fragile tests.
//
// The default value of `0` means "do not randomize".
//
// The magic value of `-1` means "pick a random seed for me", and godog will
// assign a seed on it's own during the `RunWithOptions` phase, similar to if
// you specified `--random` on the command line.
//
// Any other value will be used as the random seed for shuffling. Re-using the
// same seed will allow you to reproduce the shuffle order of a previous run
// to isolate an error condition.
Randomize int64
// Stops on the first failure
StopOnFailure bool
// Fail suite when there are pending or undefined steps
Strict bool
// Forces ansi color stripping
NoColors bool
// Various filters for scenarios parsed
// from feature files
Tags string
// The formatter name
Format string
// Concurrency rate, not all formatters accepts this
Concurrency int
// All feature file paths
Paths []string
// Where it should print formatter output
Output io.Writer
}

View File

@ -1,232 +0,0 @@
package godog
import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/DATA-DOG/godog/colors"
)
const (
exitSuccess int = iota
exitFailure
exitOptionError
)
type initializer func(*Suite)
type runner struct {
randomSeed int64
stopOnFailure, strict bool
features []*feature
fmt Formatter
initializer initializer
}
func (r *runner) concurrent(rate int) (failed bool) {
queue := make(chan int, rate)
for i, ft := range r.features {
queue <- i // reserve space in queue
go func(fail *bool, feat *feature) {
defer func() {
<-queue // free a space in queue
}()
if r.stopOnFailure && *fail {
return
}
suite := &Suite{
fmt: r.fmt,
randomSeed: r.randomSeed,
stopOnFailure: r.stopOnFailure,
strict: r.strict,
features: []*feature{feat},
}
r.initializer(suite)
suite.run()
if suite.failed {
*fail = true
}
}(&failed, ft)
}
// wait until last are processed
for i := 0; i < rate; i++ {
queue <- i
}
close(queue)
// print summary
r.fmt.Summary()
return
}
func (r *runner) run() bool {
suite := &Suite{
fmt: r.fmt,
randomSeed: r.randomSeed,
stopOnFailure: r.stopOnFailure,
strict: r.strict,
features: r.features,
}
r.initializer(suite)
suite.run()
r.fmt.Summary()
return suite.failed
}
// RunWithOptions is same as Run function, except
// it uses Options provided in order to run the
// test suite without parsing flags
//
// This method is useful in case if you run
// godog in for example TestMain function together
// with go tests
//
// The exit codes may vary from:
// 0 - success
// 1 - failed
// 2 - command line usage error
// 128 - or higher, os signal related error exit codes
//
// If there are flag related errors they will
// be directed to os.Stderr
func RunWithOptions(suite string, contextInitializer func(suite *Suite), opt Options) int {
var output io.Writer = os.Stdout
if nil != opt.Output {
output = opt.Output
}
if opt.NoColors {
output = colors.Uncolored(output)
} else {
output = colors.Colored(output)
}
if opt.ShowStepDefinitions {
s := &Suite{}
contextInitializer(s)
s.printStepDefinitions(output)
return exitOptionError
}
if len(opt.Paths) == 0 {
inf, err := os.Stat("features")
if err == nil && inf.IsDir() {
opt.Paths = []string{"features"}
}
}
if opt.Concurrency > 1 && !supportsConcurrency(opt.Format) {
fmt.Fprintln(os.Stderr, fmt.Errorf("format \"%s\" does not support concurrent execution", opt.Format))
return exitOptionError
}
formatter := findFmt(opt.Format)
if nil == formatter {
var names []string
for name := range AvailableFormatters() {
names = append(names, name)
}
fmt.Fprintln(os.Stderr, fmt.Errorf(
`unregistered formatter name: "%s", use one of: %s`,
opt.Format,
strings.Join(names, ", "),
))
return exitOptionError
}
features, err := parseFeatures(opt.Tags, opt.Paths)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return exitOptionError
}
r := runner{
fmt: formatter(suite, output),
initializer: contextInitializer,
features: features,
randomSeed: opt.Randomize,
stopOnFailure: opt.StopOnFailure,
strict: opt.Strict,
}
// store chosen seed in environment, so it could be seen in formatter summary report
os.Setenv("GODOG_SEED", strconv.FormatInt(r.randomSeed, 10))
// determine tested package
_, filename, _, _ := runtime.Caller(1)
os.Setenv("GODOG_TESTED_PACKAGE", runsFromPackage(filename))
var failed bool
if opt.Concurrency > 1 {
failed = r.concurrent(opt.Concurrency)
} else {
failed = r.run()
}
if failed && opt.Format != "events" {
return exitFailure
}
return exitSuccess
}
func runsFromPackage(fp string) string {
dir := filepath.Dir(fp)
for _, gp := range gopaths {
gp = filepath.Join(gp, "src")
if strings.Index(dir, gp) == 0 {
return strings.TrimLeft(strings.Replace(dir, gp, "", 1), string(filepath.Separator))
}
}
return dir
}
// Run creates and runs the feature suite.
// Reads all configuration options from flags.
// uses contextInitializer to register contexts
//
// the concurrency option allows runner to
// initialize a number of suites to be run
// separately. Only progress formatter
// is supported when concurrency level is
// higher than 1
//
// contextInitializer must be able to register
// the step definitions and event handlers.
//
// The exit codes may vary from:
// 0 - success
// 1 - failed
// 2 - command line usage error
// 128 - or higher, os signal related error exit codes
//
// If there are flag related errors they will
// be directed to os.Stderr
func Run(suite string, contextInitializer func(suite *Suite)) int {
var opt Options
opt.Output = colors.Colored(os.Stdout)
flagSet := FlagSet(&opt)
if err := flagSet.Parse(os.Args[1:]); err != nil {
fmt.Fprintln(os.Stderr, err)
return exitOptionError
}
opt.Paths = flagSet.Args()
return RunWithOptions(suite, contextInitializer, opt)
}
func supportsConcurrency(format string) bool {
switch format {
case "events":
case "junit":
case "pretty":
case "cucumber":
default:
return true // supports concurrency
}
return false // does not support concurrency
}

View File

@ -1,141 +0,0 @@
package godog
import (
"fmt"
"go/build"
"io"
"path"
"path/filepath"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type stackFrame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f stackFrame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f stackFrame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
func trimGoPath(file string) string {
for _, p := range filepath.SplitList(build.Default.GOPATH) {
file = strings.Replace(file, filepath.Join(p, "src")+string(filepath.Separator), "", 1)
}
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f stackFrame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f stackFrame) Format(s fmt.State, verb rune) {
funcname := func(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), trimGoPath(file))
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := stackFrame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func callStack() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// fundamental is an error that has a message and a stack, but no caller.
type traceError struct {
msg string
*stack
}
func (f *traceError) Error() string { return f.msg }
func (f *traceError) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}

View File

@ -1,211 +0,0 @@
package godog
import (
"fmt"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"github.com/DATA-DOG/godog/gherkin"
)
var matchFuncDefRef = regexp.MustCompile(`\(([^\)]+)\)`)
// Steps allows to nest steps
// instead of returning an error in step func
// it is possible to return combined steps:
//
// func multistep(name string) godog.Steps {
// return godog.Steps{
// fmt.Sprintf(`an user named "%s"`, name),
// fmt.Sprintf(`user "%s" is authenticated`, name),
// }
// }
//
// These steps will be matched and executed in
// sequential order. The first one which fails
// will result in main step failure.
type Steps []string
// StepDef is a registered step definition
// contains a StepHandler and regexp which
// is used to match a step. Args which
// were matched by last executed step
//
// This structure is passed to the formatter
// when step is matched and is either failed
// or successful
type StepDef struct {
args []interface{}
hv reflect.Value
Expr *regexp.Regexp
Handler interface{}
// multistep related
nested bool
undefined []string
}
func (sd *StepDef) definitionID() string {
ptr := sd.hv.Pointer()
f := runtime.FuncForPC(ptr)
file, line := f.FileLine(ptr)
dir := filepath.Dir(file)
fn := strings.Replace(f.Name(), dir, "", -1)
var parts []string
for _, gr := range matchFuncDefRef.FindAllStringSubmatch(fn, -1) {
parts = append(parts, strings.Trim(gr[1], "_."))
}
if len(parts) > 0 {
// case when suite is a structure with methods
fn = strings.Join(parts, ".")
} else {
// case when steps are just plain funcs
fn = strings.Trim(fn, "_.")
}
if pkg := os.Getenv("GODOG_TESTED_PACKAGE"); len(pkg) > 0 {
fn = strings.Replace(fn, pkg, "", 1)
fn = strings.TrimLeft(fn, ".")
fn = strings.Replace(fn, "..", ".", -1)
}
return fmt.Sprintf("%s:%d -> %s", filepath.Base(file), line, fn)
}
// run a step with the matched arguments using
// reflect
func (sd *StepDef) run() interface{} {
typ := sd.hv.Type()
if len(sd.args) < typ.NumIn() {
return fmt.Errorf("func expects %d arguments, which is more than %d matched from step", typ.NumIn(), len(sd.args))
}
var values []reflect.Value
for i := 0; i < typ.NumIn(); i++ {
param := typ.In(i)
switch param.Kind() {
case reflect.Int:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseInt(s, 10, 0)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to int: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(int(v)))
case reflect.Int64:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to int64: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(int64(v)))
case reflect.Int32:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to int32: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(int32(v)))
case reflect.Int16:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseInt(s, 10, 16)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to int16: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(int16(v)))
case reflect.Int8:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseInt(s, 10, 8)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to int8: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(int8(v)))
case reflect.String:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
values = append(values, reflect.ValueOf(s))
case reflect.Float64:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to float64: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(v))
case reflect.Float32:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
v, err := strconv.ParseFloat(s, 32)
if err != nil {
return fmt.Errorf(`cannot convert argument %d: "%s" to float32: %s`, i, s, err)
}
values = append(values, reflect.ValueOf(float32(v)))
case reflect.Ptr:
arg := sd.args[i]
switch param.Elem().String() {
case "gherkin.DocString":
v, ok := arg.(*gherkin.DocString)
if !ok {
return fmt.Errorf(`cannot convert argument %d: "%v" of type "%T" to *gherkin.DocString`, i, arg, arg)
}
values = append(values, reflect.ValueOf(v))
case "gherkin.DataTable":
v, ok := arg.(*gherkin.DataTable)
if !ok {
return fmt.Errorf(`cannot convert argument %d: "%v" of type "%T" to *gherkin.DocString`, i, arg, arg)
}
values = append(values, reflect.ValueOf(v))
default:
return fmt.Errorf("the argument %d type %T is not supported", i, arg)
}
case reflect.Slice:
switch param {
case typeOfBytes:
s, err := sd.shouldBeString(i)
if err != nil {
return err
}
values = append(values, reflect.ValueOf([]byte(s)))
default:
return fmt.Errorf("the slice argument %d type %s is not supported", i, param.Kind())
}
default:
return fmt.Errorf("the argument %d type %s is not supported", i, param.Kind())
}
}
return sd.hv.Call(values)[0].Interface()
}
func (sd *StepDef) shouldBeString(idx int) (string, error) {
arg := sd.args[idx]
s, ok := arg.(string)
if !ok {
return "", fmt.Errorf(`cannot convert argument %d: "%v" of type "%T" to string`, idx, arg, arg)
}
return s, nil
}

Some files were not shown because too many files have changed in this diff Show More