Merge pull request #1607 from XiShanYongYe-Chang/upgrade-ginkgo-to-v2
[E2E] Upgrade ginkgo to v2
This commit is contained in:
commit
e99af2b7a1
5
go.mod
5
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/kr/pretty v0.3.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.1.3
|
||||
github.com/onsi/gomega v1.17.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/spf13/cobra v1.3.0
|
||||
|
@ -75,6 +75,7 @@ require (
|
|||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
|
@ -101,7 +102,6 @@ require (
|
|||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
|
@ -152,7 +152,6 @@ require (
|
|||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.3 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/cluster-bootstrap v0.23.4 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -426,8 +426,10 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
|
@ -650,6 +652,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
|
|||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
|
|
|
@ -27,7 +27,7 @@ ARTIFACTS_PATH=${ARTIFACTS_PATH:-"${HOME}/karmada-e2e-logs"}
|
|||
mkdir -p "$ARTIFACTS_PATH"
|
||||
|
||||
# Install ginkgo
|
||||
GO111MODULE=on go install github.com/onsi/ginkgo/ginkgo
|
||||
GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
# Pre run e2e for install extra conponents
|
||||
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
|
@ -38,7 +38,7 @@ export KUBECONFIG=${KARMADA_APISERVER_KUBECONFIG}
|
|||
export PULL_BASED_CLUSTERS=${PULL_BASED_CLUSTERS}
|
||||
|
||||
set +e
|
||||
ginkgo -v -race -failFast ./test/e2e/
|
||||
ginkgo -v -race -fail-fast ./test/e2e/
|
||||
TESTING_RESULT=$?
|
||||
|
||||
# Collect logs
|
||||
|
|
|
@ -5,7 +5,7 @@ package tools
|
|||
|
||||
import (
|
||||
_ "github.com/gogo/protobuf/protoc-gen-gogo"
|
||||
_ "github.com/onsi/ginkgo/ginkgo"
|
||||
_ "github.com/onsi/ginkgo/v2/ginkgo"
|
||||
_ "github.com/vektra/mockery/v2"
|
||||
_ "golang.org/x/tools/cmd/goimports"
|
||||
_ "k8s.io/code-generator"
|
||||
|
|
|
@ -3,7 +3,7 @@ package e2e
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
|
|
@ -3,7 +3,7 @@ package e2e
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
|
|
@ -3,7 +3,7 @@ package e2e
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
|
|
|
@ -3,7 +3,7 @@ package e2e
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -31,11 +31,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// TestSuiteSetupTimeOut defines the time after which the suite setup times out.
|
||||
TestSuiteSetupTimeOut = 300 * time.Second
|
||||
// TestSuiteTeardownTimeOut defines the time after which the suite tear down times out.
|
||||
TestSuiteTeardownTimeOut = 300 * time.Second
|
||||
|
||||
// pollInterval defines the interval time for a poll operation.
|
||||
pollInterval = 5 * time.Second
|
||||
// pollTimeout defines the time after which the poll operation times out.
|
||||
|
@ -109,7 +104,7 @@ var _ = ginkgo.BeforeSuite(func() {
|
|||
|
||||
err = setupTestNamespace(testNamespace, kubeClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}, TestSuiteSetupTimeOut.Seconds())
|
||||
})
|
||||
|
||||
var _ = ginkgo.AfterSuite(func() {
|
||||
// cleanup clusterLabels set by the E2E test
|
||||
|
@ -122,7 +117,7 @@ var _ = ginkgo.AfterSuite(func() {
|
|||
// It will not return error even if there is no such namespace in there that may happen in case setup failed.
|
||||
err := cleanupTestNamespace(testNamespace, kubeClient)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
}, TestSuiteTeardownTimeOut.Seconds())
|
||||
})
|
||||
|
||||
// setupTestNamespace will create a namespace in control plane and all member clusters, most of cases will run against it.
|
||||
// The reason why we need a separated namespace is it will make it easier to cleanup resources deployed by the testing.
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
|
@ -22,8 +22,8 @@ func TestIntegration(t *testing.T) {
|
|||
|
||||
var _ = ginkgo.BeforeSuite(func() {
|
||||
// suite set up, such as get karmada environment ready.
|
||||
}, TestSuiteSetupTimeOut.Seconds())
|
||||
})
|
||||
|
||||
var _ = ginkgo.AfterSuite(func() {
|
||||
// suite tear down, such as cleanup karmada environment.
|
||||
}, TestSuiteTeardownTimeOut.Seconds())
|
||||
})
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package integration
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
import "github.com/onsi/ginkgo/v2"
|
||||
|
||||
var _ = ginkgo.Describe("Work", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
# This is the official list of pprof authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
Google Inc.
|
|
@ -0,0 +1,16 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
Raul Silvera <rsilvera@google.com>
|
||||
Tipp Moseley <tipp@google.com>
|
||||
Hyoun Kyu Cho <netforce@google.com>
|
||||
Martin Spier <spiermar@gmail.com>
|
||||
Taco de Wolff <tacodewolff@gmail.com>
|
||||
Andrew Hunter <andrewhhunter@gmail.com>
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,567 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func (p *Profile) decoder() []decoder {
|
||||
return profileDecoder
|
||||
}
|
||||
|
||||
// preEncode populates the unexported fields to be used by encode
|
||||
// (with suffix X) from the corresponding exported fields. The
|
||||
// exported fields are cleared up to facilitate testing.
|
||||
func (p *Profile) preEncode() {
|
||||
strings := make(map[string]int)
|
||||
addString(strings, "")
|
||||
|
||||
for _, st := range p.SampleType {
|
||||
st.typeX = addString(strings, st.Type)
|
||||
st.unitX = addString(strings, st.Unit)
|
||||
}
|
||||
|
||||
for _, s := range p.Sample {
|
||||
s.labelX = nil
|
||||
var keys []string
|
||||
for k := range s.Label {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
vs := s.Label[k]
|
||||
for _, v := range vs {
|
||||
s.labelX = append(s.labelX,
|
||||
label{
|
||||
keyX: addString(strings, k),
|
||||
strX: addString(strings, v),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
var numKeys []string
|
||||
for k := range s.NumLabel {
|
||||
numKeys = append(numKeys, k)
|
||||
}
|
||||
sort.Strings(numKeys)
|
||||
for _, k := range numKeys {
|
||||
keyX := addString(strings, k)
|
||||
vs := s.NumLabel[k]
|
||||
units := s.NumUnit[k]
|
||||
for i, v := range vs {
|
||||
var unitX int64
|
||||
if len(units) != 0 {
|
||||
unitX = addString(strings, units[i])
|
||||
}
|
||||
s.labelX = append(s.labelX,
|
||||
label{
|
||||
keyX: keyX,
|
||||
numX: v,
|
||||
unitX: unitX,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
s.locationIDX = make([]uint64, len(s.Location))
|
||||
for i, loc := range s.Location {
|
||||
s.locationIDX[i] = loc.ID
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range p.Mapping {
|
||||
m.fileX = addString(strings, m.File)
|
||||
m.buildIDX = addString(strings, m.BuildID)
|
||||
}
|
||||
|
||||
for _, l := range p.Location {
|
||||
for i, ln := range l.Line {
|
||||
if ln.Function != nil {
|
||||
l.Line[i].functionIDX = ln.Function.ID
|
||||
} else {
|
||||
l.Line[i].functionIDX = 0
|
||||
}
|
||||
}
|
||||
if l.Mapping != nil {
|
||||
l.mappingIDX = l.Mapping.ID
|
||||
} else {
|
||||
l.mappingIDX = 0
|
||||
}
|
||||
}
|
||||
for _, f := range p.Function {
|
||||
f.nameX = addString(strings, f.Name)
|
||||
f.systemNameX = addString(strings, f.SystemName)
|
||||
f.filenameX = addString(strings, f.Filename)
|
||||
}
|
||||
|
||||
p.dropFramesX = addString(strings, p.DropFrames)
|
||||
p.keepFramesX = addString(strings, p.KeepFrames)
|
||||
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
pt.typeX = addString(strings, pt.Type)
|
||||
pt.unitX = addString(strings, pt.Unit)
|
||||
}
|
||||
|
||||
p.commentX = nil
|
||||
for _, c := range p.Comments {
|
||||
p.commentX = append(p.commentX, addString(strings, c))
|
||||
}
|
||||
|
||||
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||
|
||||
p.stringTable = make([]string, len(strings))
|
||||
for s, i := range strings {
|
||||
p.stringTable[i] = s
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Profile) encode(b *buffer) {
|
||||
for _, x := range p.SampleType {
|
||||
encodeMessage(b, 1, x)
|
||||
}
|
||||
for _, x := range p.Sample {
|
||||
encodeMessage(b, 2, x)
|
||||
}
|
||||
for _, x := range p.Mapping {
|
||||
encodeMessage(b, 3, x)
|
||||
}
|
||||
for _, x := range p.Location {
|
||||
encodeMessage(b, 4, x)
|
||||
}
|
||||
for _, x := range p.Function {
|
||||
encodeMessage(b, 5, x)
|
||||
}
|
||||
encodeStrings(b, 6, p.stringTable)
|
||||
encodeInt64Opt(b, 7, p.dropFramesX)
|
||||
encodeInt64Opt(b, 8, p.keepFramesX)
|
||||
encodeInt64Opt(b, 9, p.TimeNanos)
|
||||
encodeInt64Opt(b, 10, p.DurationNanos)
|
||||
if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
|
||||
encodeMessage(b, 11, p.PeriodType)
|
||||
}
|
||||
encodeInt64Opt(b, 12, p.Period)
|
||||
encodeInt64s(b, 13, p.commentX)
|
||||
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||
}
|
||||
|
||||
var profileDecoder = []decoder{
|
||||
nil, // 0
|
||||
// repeated ValueType sample_type = 1
|
||||
func(b *buffer, m message) error {
|
||||
x := new(ValueType)
|
||||
pp := m.(*Profile)
|
||||
pp.SampleType = append(pp.SampleType, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated Sample sample = 2
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Sample)
|
||||
pp := m.(*Profile)
|
||||
pp.Sample = append(pp.Sample, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated Mapping mapping = 3
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Mapping)
|
||||
pp := m.(*Profile)
|
||||
pp.Mapping = append(pp.Mapping, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated Location location = 4
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Location)
|
||||
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||
pp := m.(*Profile)
|
||||
pp.Location = append(pp.Location, x)
|
||||
err := decodeMessage(b, x)
|
||||
var tmp []Line
|
||||
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||
return err
|
||||
},
|
||||
// repeated Function function = 5
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Function)
|
||||
pp := m.(*Profile)
|
||||
pp.Function = append(pp.Function, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated string string_table = 6
|
||||
func(b *buffer, m message) error {
|
||||
err := decodeStrings(b, &m.(*Profile).stringTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.(*Profile).stringTable[0] != "" {
|
||||
return errors.New("string_table[0] must be ''")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
// int64 drop_frames = 7
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
|
||||
// int64 keep_frames = 8
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
|
||||
// int64 time_nanos = 9
|
||||
func(b *buffer, m message) error {
|
||||
if m.(*Profile).TimeNanos != 0 {
|
||||
return errConcatProfile
|
||||
}
|
||||
return decodeInt64(b, &m.(*Profile).TimeNanos)
|
||||
},
|
||||
// int64 duration_nanos = 10
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
|
||||
// ValueType period_type = 11
|
||||
func(b *buffer, m message) error {
|
||||
x := new(ValueType)
|
||||
pp := m.(*Profile)
|
||||
pp.PeriodType = x
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// int64 period = 12
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
|
||||
// repeated int64 comment = 13
|
||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||
// int64 defaultSampleType = 14
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||
}
|
||||
|
||||
// postDecode takes the unexported fields populated by decode (with
|
||||
// suffix X) and populates the corresponding exported fields.
|
||||
// The unexported fields are cleared up to facilitate testing.
|
||||
func (p *Profile) postDecode() error {
|
||||
var err error
|
||||
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||
mappingIds := make([]*Mapping, len(p.Mapping)+1)
|
||||
for _, m := range p.Mapping {
|
||||
m.File, err = getString(p.stringTable, &m.fileX, err)
|
||||
m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
|
||||
if m.ID < uint64(len(mappingIds)) {
|
||||
mappingIds[m.ID] = m
|
||||
} else {
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
}
|
||||
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
functionIds := make([]*Function, len(p.Function)+1)
|
||||
for _, f := range p.Function {
|
||||
f.Name, err = getString(p.stringTable, &f.nameX, err)
|
||||
f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
|
||||
f.Filename, err = getString(p.stringTable, &f.filenameX, err)
|
||||
if f.ID < uint64(len(functionIds)) {
|
||||
functionIds[f.ID] = f
|
||||
} else {
|
||||
functions[f.ID] = f
|
||||
}
|
||||
}
|
||||
|
||||
locations := make(map[uint64]*Location, len(p.Location))
|
||||
locationIds := make([]*Location, len(p.Location)+1)
|
||||
for _, l := range p.Location {
|
||||
if id := l.mappingIDX; id < uint64(len(mappingIds)) {
|
||||
l.Mapping = mappingIds[id]
|
||||
} else {
|
||||
l.Mapping = mappings[id]
|
||||
}
|
||||
l.mappingIDX = 0
|
||||
for i, ln := range l.Line {
|
||||
if id := ln.functionIDX; id != 0 {
|
||||
l.Line[i].functionIDX = 0
|
||||
if id < uint64(len(functionIds)) {
|
||||
l.Line[i].Function = functionIds[id]
|
||||
} else {
|
||||
l.Line[i].Function = functions[id]
|
||||
}
|
||||
}
|
||||
}
|
||||
if l.ID < uint64(len(locationIds)) {
|
||||
locationIds[l.ID] = l
|
||||
} else {
|
||||
locations[l.ID] = l
|
||||
}
|
||||
}
|
||||
|
||||
for _, st := range p.SampleType {
|
||||
st.Type, err = getString(p.stringTable, &st.typeX, err)
|
||||
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||
}
|
||||
|
||||
for _, s := range p.Sample {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
}
|
||||
s.Location = make([]*Location, len(s.locationIDX))
|
||||
for i, lid := range s.locationIDX {
|
||||
if lid < uint64(len(locationIds)) {
|
||||
s.Location[i] = locationIds[lid]
|
||||
} else {
|
||||
s.Location[i] = locations[lid]
|
||||
}
|
||||
}
|
||||
s.locationIDX = nil
|
||||
}
|
||||
|
||||
p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
|
||||
p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
|
||||
|
||||
if pt := p.PeriodType; pt == nil {
|
||||
p.PeriodType = &ValueType{}
|
||||
}
|
||||
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
pt.Type, err = getString(p.stringTable, &pt.typeX, err)
|
||||
pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
|
||||
}
|
||||
|
||||
for _, i := range p.commentX {
|
||||
var c string
|
||||
c, err = getString(p.stringTable, &i, err)
|
||||
p.Comments = append(p.Comments, c)
|
||||
}
|
||||
|
||||
p.commentX = nil
|
||||
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||
p.stringTable = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// padStringArray pads arr with enough empty strings to make arr
|
||||
// length l when arr's length is less than l.
|
||||
func padStringArray(arr []string, l int) []string {
|
||||
if l <= len(arr) {
|
||||
return arr
|
||||
}
|
||||
return append(arr, make([]string, l-len(arr))...)
|
||||
}
|
||||
|
||||
func (p *ValueType) decoder() []decoder {
|
||||
return valueTypeDecoder
|
||||
}
|
||||
|
||||
func (p *ValueType) encode(b *buffer) {
|
||||
encodeInt64Opt(b, 1, p.typeX)
|
||||
encodeInt64Opt(b, 2, p.unitX)
|
||||
}
|
||||
|
||||
var valueTypeDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional int64 type = 1
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
|
||||
// optional int64 unit = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
|
||||
}
|
||||
|
||||
func (p *Sample) decoder() []decoder {
|
||||
return sampleDecoder
|
||||
}
|
||||
|
||||
func (p *Sample) encode(b *buffer) {
|
||||
encodeUint64s(b, 1, p.locationIDX)
|
||||
encodeInt64s(b, 2, p.Value)
|
||||
for _, x := range p.labelX {
|
||||
encodeMessage(b, 3, x)
|
||||
}
|
||||
}
|
||||
|
||||
var sampleDecoder = []decoder{
|
||||
nil, // 0
|
||||
// repeated uint64 location = 1
|
||||
func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
|
||||
// repeated int64 value = 2
|
||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
|
||||
// repeated Label label = 3
|
||||
func(b *buffer, m message) error {
|
||||
s := m.(*Sample)
|
||||
n := len(s.labelX)
|
||||
s.labelX = append(s.labelX, label{})
|
||||
return decodeMessage(b, &s.labelX[n])
|
||||
},
|
||||
}
|
||||
|
||||
func (p label) decoder() []decoder {
|
||||
return labelDecoder
|
||||
}
|
||||
|
||||
func (p label) encode(b *buffer) {
|
||||
encodeInt64Opt(b, 1, p.keyX)
|
||||
encodeInt64Opt(b, 2, p.strX)
|
||||
encodeInt64Opt(b, 3, p.numX)
|
||||
encodeInt64Opt(b, 4, p.unitX)
|
||||
}
|
||||
|
||||
var labelDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional int64 key = 1
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
|
||||
// optional int64 str = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
|
||||
// optional int64 num = 3
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
|
||||
// optional int64 num = 4
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
|
||||
}
|
||||
|
||||
func (p *Mapping) decoder() []decoder {
|
||||
return mappingDecoder
|
||||
}
|
||||
|
||||
func (p *Mapping) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.ID)
|
||||
encodeUint64Opt(b, 2, p.Start)
|
||||
encodeUint64Opt(b, 3, p.Limit)
|
||||
encodeUint64Opt(b, 4, p.Offset)
|
||||
encodeInt64Opt(b, 5, p.fileX)
|
||||
encodeInt64Opt(b, 6, p.buildIDX)
|
||||
encodeBoolOpt(b, 7, p.HasFunctions)
|
||||
encodeBoolOpt(b, 8, p.HasFilenames)
|
||||
encodeBoolOpt(b, 9, p.HasLineNumbers)
|
||||
encodeBoolOpt(b, 10, p.HasInlineFrames)
|
||||
}
|
||||
|
||||
var mappingDecoder = []decoder{
|
||||
nil, // 0
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
|
||||
}
|
||||
|
||||
func (p *Location) decoder() []decoder {
|
||||
return locationDecoder
|
||||
}
|
||||
|
||||
func (p *Location) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.ID)
|
||||
encodeUint64Opt(b, 2, p.mappingIDX)
|
||||
encodeUint64Opt(b, 3, p.Address)
|
||||
for i := range p.Line {
|
||||
encodeMessage(b, 4, &p.Line[i])
|
||||
}
|
||||
encodeBoolOpt(b, 5, p.IsFolded)
|
||||
}
|
||||
|
||||
var locationDecoder = []decoder{
|
||||
nil, // 0
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
|
||||
func(b *buffer, m message) error { // repeated Line line = 4
|
||||
pp := m.(*Location)
|
||||
n := len(pp.Line)
|
||||
pp.Line = append(pp.Line, Line{})
|
||||
return decodeMessage(b, &pp.Line[n])
|
||||
},
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
|
||||
}
|
||||
|
||||
func (p *Line) decoder() []decoder {
|
||||
return lineDecoder
|
||||
}
|
||||
|
||||
func (p *Line) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.functionIDX)
|
||||
encodeInt64Opt(b, 2, p.Line)
|
||||
}
|
||||
|
||||
var lineDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional uint64 function_id = 1
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
||||
// optional int64 line = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
||||
}
|
||||
|
||||
func (p *Function) decoder() []decoder {
|
||||
return functionDecoder
|
||||
}
|
||||
|
||||
func (p *Function) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.ID)
|
||||
encodeInt64Opt(b, 2, p.nameX)
|
||||
encodeInt64Opt(b, 3, p.systemNameX)
|
||||
encodeInt64Opt(b, 4, p.filenameX)
|
||||
encodeInt64Opt(b, 5, p.StartLine)
|
||||
}
|
||||
|
||||
var functionDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional uint64 id = 1
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
|
||||
// optional int64 function_name = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
|
||||
// optional int64 function_system_name = 3
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
|
||||
// repeated int64 filename = 4
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
|
||||
// optional int64 start_line = 5
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
|
||||
}
|
||||
|
||||
func addString(strings map[string]int, s string) int64 {
|
||||
i, ok := strings[s]
|
||||
if !ok {
|
||||
i = len(strings)
|
||||
strings[s] = i
|
||||
}
|
||||
return int64(i)
|
||||
}
|
||||
|
||||
func getString(strings []string, strng *int64, err error) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s := int(*strng)
|
||||
if s < 0 || s >= len(strings) {
|
||||
return "", errMalformed
|
||||
}
|
||||
*strng = 0
|
||||
return strings[s], nil
|
||||
}
|
|
@ -0,0 +1,270 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
// Implements methods to filter samples from profiles.
|
||||
|
||||
import "regexp"
|
||||
|
||||
// FilterSamplesByName filters the samples in a profile and only keeps
|
||||
// samples where at least one frame matches focus but none match ignore.
|
||||
// Returns true is the corresponding regexp matched at least one sample.
|
||||
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||
focusOrIgnore := make(map[uint64]bool)
|
||||
hidden := make(map[uint64]bool)
|
||||
for _, l := range p.Location {
|
||||
if ignore != nil && l.matchesName(ignore) {
|
||||
im = true
|
||||
focusOrIgnore[l.ID] = false
|
||||
} else if focus == nil || l.matchesName(focus) {
|
||||
fm = true
|
||||
focusOrIgnore[l.ID] = true
|
||||
}
|
||||
|
||||
if hide != nil && l.matchesName(hide) {
|
||||
hm = true
|
||||
l.Line = l.unmatchedLines(hide)
|
||||
if len(l.Line) == 0 {
|
||||
hidden[l.ID] = true
|
||||
}
|
||||
}
|
||||
if show != nil {
|
||||
l.Line = l.matchedLines(show)
|
||||
if len(l.Line) == 0 {
|
||||
hidden[l.ID] = true
|
||||
} else {
|
||||
hnm = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s := make([]*Sample, 0, len(p.Sample))
|
||||
for _, sample := range p.Sample {
|
||||
if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
|
||||
if len(hidden) > 0 {
|
||||
var locs []*Location
|
||||
for _, loc := range sample.Location {
|
||||
if !hidden[loc.ID] {
|
||||
locs = append(locs, loc)
|
||||
}
|
||||
}
|
||||
if len(locs) == 0 {
|
||||
// Remove sample with no locations (by not adding it to s).
|
||||
continue
|
||||
}
|
||||
sample.Location = locs
|
||||
}
|
||||
s = append(s, sample)
|
||||
}
|
||||
}
|
||||
p.Sample = s
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ShowFrom drops all stack frames above the highest matching frame and returns
|
||||
// whether a match was found. If showFrom is nil it returns false and does not
|
||||
// modify the profile.
|
||||
//
|
||||
// Example: consider a sample with frames [A, B, C, B], where A is the root.
|
||||
// ShowFrom(nil) returns false and has frames [A, B, C, B].
|
||||
// ShowFrom(A) returns true and has frames [A, B, C, B].
|
||||
// ShowFrom(B) returns true and has frames [B, C, B].
|
||||
// ShowFrom(C) returns true and has frames [C, B].
|
||||
// ShowFrom(D) returns false and drops the sample because no frames remain.
|
||||
func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
|
||||
if showFrom == nil {
|
||||
return false
|
||||
}
|
||||
// showFromLocs stores location IDs that matched ShowFrom.
|
||||
showFromLocs := make(map[uint64]bool)
|
||||
// Apply to locations.
|
||||
for _, loc := range p.Location {
|
||||
if filterShowFromLocation(loc, showFrom) {
|
||||
showFromLocs[loc.ID] = true
|
||||
matched = true
|
||||
}
|
||||
}
|
||||
// For all samples, strip locations after the highest matching one.
|
||||
s := make([]*Sample, 0, len(p.Sample))
|
||||
for _, sample := range p.Sample {
|
||||
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||
if showFromLocs[sample.Location[i].ID] {
|
||||
sample.Location = sample.Location[:i+1]
|
||||
s = append(s, sample)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
p.Sample = s
|
||||
return matched
|
||||
}
|
||||
|
||||
// filterShowFromLocation tests a showFrom regex against a location, removes
|
||||
// lines after the last match and returns whether a match was found. If the
|
||||
// mapping is matched, then all lines are kept.
|
||||
func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
|
||||
if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
|
||||
return true
|
||||
}
|
||||
if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
|
||||
loc.Line = loc.Line[:i+1]
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lastMatchedLineIndex returns the index of the last line that matches a regex,
|
||||
// or -1 if no match is found.
|
||||
func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
|
||||
for i := len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil {
|
||||
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// FilterTagsByName filters the tags in a profile and only keeps
|
||||
// tags that match show and not hide.
|
||||
func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
|
||||
matchRemove := func(name string) bool {
|
||||
matchShow := show == nil || show.MatchString(name)
|
||||
matchHide := hide != nil && hide.MatchString(name)
|
||||
|
||||
if matchShow {
|
||||
sm = true
|
||||
}
|
||||
if matchHide {
|
||||
hm = true
|
||||
}
|
||||
return !matchShow || matchHide
|
||||
}
|
||||
for _, s := range p.Sample {
|
||||
for lab := range s.Label {
|
||||
if matchRemove(lab) {
|
||||
delete(s.Label, lab)
|
||||
}
|
||||
}
|
||||
for lab := range s.NumLabel {
|
||||
if matchRemove(lab) {
|
||||
delete(s.NumLabel, lab)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// matchesName returns whether the location matches the regular
|
||||
// expression. It checks any available function names, file names, and
|
||||
// mapping object filename.
|
||||
func (loc *Location) matchesName(re *regexp.Regexp) bool {
|
||||
for _, ln := range loc.Line {
|
||||
if fn := ln.Function; fn != nil {
|
||||
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// unmatchedLines returns the lines in the location that do not match
|
||||
// the regular expression.
|
||||
func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
|
||||
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||
return nil
|
||||
}
|
||||
var lines []Line
|
||||
for _, ln := range loc.Line {
|
||||
if fn := ln.Function; fn != nil {
|
||||
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
lines = append(lines, ln)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// matchedLines returns the lines in the location that match
|
||||
// the regular expression.
|
||||
func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
|
||||
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||
return loc.Line
|
||||
}
|
||||
var lines []Line
|
||||
for _, ln := range loc.Line {
|
||||
if fn := ln.Function; fn != nil {
|
||||
if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
lines = append(lines, ln)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// focusedAndNotIgnored looks up a slice of ids against a map of
|
||||
// focused/ignored locations. The map only contains locations that are
|
||||
// explicitly focused or ignored. Returns whether there is at least
|
||||
// one focused location but no ignored locations.
|
||||
func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
|
||||
var f bool
|
||||
for _, loc := range locs {
|
||||
if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
|
||||
if focus {
|
||||
// Found focused location. Must keep searching in case there
|
||||
// is an ignored one as well.
|
||||
f = true
|
||||
} else {
|
||||
// Found ignored location. Can return false right away.
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// TagMatch selects tags for filtering
|
||||
type TagMatch func(s *Sample) bool
|
||||
|
||||
// FilterSamplesByTag removes all samples from the profile, except
|
||||
// those that match focus and do not match the ignore regular
|
||||
// expression.
|
||||
func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
|
||||
samples := make([]*Sample, 0, len(p.Sample))
|
||||
for _, s := range p.Sample {
|
||||
focused, ignored := true, false
|
||||
if focus != nil {
|
||||
focused = focus(s)
|
||||
}
|
||||
if ignore != nil {
|
||||
ignored = ignore(s)
|
||||
}
|
||||
fm = fm || focused
|
||||
im = im || ignored
|
||||
if focused && !ignored {
|
||||
samples = append(samples, s)
|
||||
}
|
||||
}
|
||||
p.Sample = samples
|
||||
return
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SampleIndexByName returns the appropriate index for a value of sample index.
|
||||
// If numeric, it returns the number, otherwise it looks up the text in the
|
||||
// profile sample types.
|
||||
func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
|
||||
if sampleIndex == "" {
|
||||
if dst := p.DefaultSampleType; dst != "" {
|
||||
for i, t := range sampleTypes(p) {
|
||||
if t == dst {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// By default select the last sample value
|
||||
return len(p.SampleType) - 1, nil
|
||||
}
|
||||
if i, err := strconv.Atoi(sampleIndex); err == nil {
|
||||
if i < 0 || i >= len(p.SampleType) {
|
||||
return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// Remove the inuse_ prefix to support legacy pprof options
|
||||
// "inuse_space" and "inuse_objects" for profiles containing types
|
||||
// "space" and "objects".
|
||||
noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
|
||||
for i, t := range p.SampleType {
|
||||
if t.Type == sampleIndex || t.Type == noInuse {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
|
||||
}
|
||||
|
||||
func sampleTypes(p *Profile) []string {
|
||||
types := make([]string, len(p.SampleType))
|
||||
for i, t := range p.SampleType {
|
||||
types[i] = t.Type
|
||||
}
|
||||
return types
|
||||
}
|
|
@ -0,0 +1,315 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file implements parsers to convert java legacy profiles into
|
||||
// the profile.proto format.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
|
||||
javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
|
||||
javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
|
||||
javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
|
||||
javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
|
||||
)
|
||||
|
||||
// javaCPUProfile returns a new Profile from profilez data.
|
||||
// b is the profile bytes after the header, period is the profiling
|
||||
// period, and parse is a function to parse 8-byte chunks from the
|
||||
// profile in its native endianness.
|
||||
func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
|
||||
p := &Profile{
|
||||
Period: period * 1000,
|
||||
PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
|
||||
SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
|
||||
}
|
||||
var err error
|
||||
var locs map[uint64]*Location
|
||||
if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip out addresses for better merge.
|
||||
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseJavaProfile returns a new profile from heapz or contentionz
|
||||
// data. b is the profile bytes after the header.
|
||||
func parseJavaProfile(b []byte) (*Profile, error) {
|
||||
h := bytes.SplitAfterN(b, []byte("\n"), 2)
|
||||
if len(h) < 2 {
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
PeriodType: &ValueType{},
|
||||
}
|
||||
header := string(bytes.TrimSpace(h[0]))
|
||||
|
||||
var err error
|
||||
var pType string
|
||||
switch header {
|
||||
case "--- heapz 1 ---":
|
||||
pType = "heap"
|
||||
case "--- contentionz 1 ---":
|
||||
pType = "contention"
|
||||
default:
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
|
||||
if b, err = parseJavaHeader(pType, h[1], p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var locs map[uint64]*Location
|
||||
if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip out addresses for better merge.
|
||||
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseJavaHeader parses the attribute section on a java profile and
|
||||
// populates a profile. Returns the remainder of the buffer after all
|
||||
// attributes.
|
||||
func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
|
||||
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||
for nextNewLine != -1 {
|
||||
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||
if line != "" {
|
||||
h := attributeRx.FindStringSubmatch(line)
|
||||
if h == nil {
|
||||
// Not a valid attribute, exit.
|
||||
return b, nil
|
||||
}
|
||||
|
||||
attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
|
||||
var err error
|
||||
switch pType + "/" + attribute {
|
||||
case "heap/format", "cpu/format", "contention/format":
|
||||
if value != "java" {
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
case "heap/resolution":
|
||||
p.SampleType = []*ValueType{
|
||||
{Type: "inuse_objects", Unit: "count"},
|
||||
{Type: "inuse_space", Unit: value},
|
||||
}
|
||||
case "contention/resolution":
|
||||
p.SampleType = []*ValueType{
|
||||
{Type: "contentions", Unit: "count"},
|
||||
{Type: "delay", Unit: value},
|
||||
}
|
||||
case "contention/sampling period":
|
||||
p.PeriodType = &ValueType{
|
||||
Type: "contentions", Unit: "count",
|
||||
}
|
||||
if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||
}
|
||||
case "contention/ms since reset":
|
||||
millis, err := strconv.ParseInt(value, 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||
}
|
||||
p.DurationNanos = millis * 1000 * 1000
|
||||
default:
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
}
|
||||
// Grab next line.
|
||||
b = b[nextNewLine+1:]
|
||||
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseJavaSamples parses the samples from a java profile and
|
||||
// populates the Samples in a profile. Returns the remainder of the
|
||||
// buffer after the samples.
|
||||
func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
|
||||
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||
locs := make(map[uint64]*Location)
|
||||
for nextNewLine != -1 {
|
||||
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||
if line != "" {
|
||||
sample := javaSampleRx.FindStringSubmatch(line)
|
||||
if sample == nil {
|
||||
// Not a valid sample, exit.
|
||||
return b, locs, nil
|
||||
}
|
||||
|
||||
// Java profiles have data/fields inverted compared to other
|
||||
// profile types.
|
||||
var err error
|
||||
value1, value2, value3 := sample[2], sample[1], sample[3]
|
||||
addrs, err := parseHexAddresses(value3)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
|
||||
}
|
||||
|
||||
var sloc []*Location
|
||||
for _, addr := range addrs {
|
||||
loc := locs[addr]
|
||||
if locs[addr] == nil {
|
||||
loc = &Location{
|
||||
Address: addr,
|
||||
}
|
||||
p.Location = append(p.Location, loc)
|
||||
locs[addr] = loc
|
||||
}
|
||||
sloc = append(sloc, loc)
|
||||
}
|
||||
s := &Sample{
|
||||
Value: make([]int64, 2),
|
||||
Location: sloc,
|
||||
}
|
||||
|
||||
if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||
}
|
||||
if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||
}
|
||||
|
||||
switch pType {
|
||||
case "heap":
|
||||
const javaHeapzSamplingRate = 524288 // 512K
|
||||
if s.Value[0] == 0 {
|
||||
return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
|
||||
}
|
||||
s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
|
||||
s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
|
||||
case "contention":
|
||||
if period := p.Period; period != 0 {
|
||||
s.Value[0] = s.Value[0] * p.Period
|
||||
s.Value[1] = s.Value[1] * p.Period
|
||||
}
|
||||
}
|
||||
p.Sample = append(p.Sample, s)
|
||||
}
|
||||
// Grab next line.
|
||||
b = b[nextNewLine+1:]
|
||||
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||
}
|
||||
return b, locs, nil
|
||||
}
|
||||
|
||||
// parseJavaLocations parses the location information in a java
|
||||
// profile and populates the Locations in a profile. It uses the
|
||||
// location addresses from the profile as both the ID of each
|
||||
// location.
|
||||
func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
|
||||
r := bytes.NewBuffer(b)
|
||||
fns := make(map[string]*Function)
|
||||
for {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if line == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if line = strings.TrimSpace(line); line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
jloc := javaLocationRx.FindStringSubmatch(line)
|
||||
if len(jloc) != 3 {
|
||||
continue
|
||||
}
|
||||
addr, err := strconv.ParseUint(jloc[1], 16, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing sample %s: %v", line, err)
|
||||
}
|
||||
loc := locs[addr]
|
||||
if loc == nil {
|
||||
// Unused/unseen
|
||||
continue
|
||||
}
|
||||
var lineFunc, lineFile string
|
||||
var lineNo int64
|
||||
|
||||
if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
|
||||
// Found a line of the form: "function (file:line)"
|
||||
lineFunc, lineFile = fileLine[1], fileLine[2]
|
||||
if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
|
||||
lineNo = n
|
||||
}
|
||||
} else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
|
||||
// If there's not a file:line, it's a shared library path.
|
||||
// The path isn't interesting, so just give the .so.
|
||||
lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
|
||||
} else if strings.Contains(jloc[2], "generated stub/JIT") {
|
||||
lineFunc = "STUB"
|
||||
} else {
|
||||
// Treat whole line as the function name. This is used by the
|
||||
// java agent for internal states such as "GC" or "VM".
|
||||
lineFunc = jloc[2]
|
||||
}
|
||||
fn := fns[lineFunc]
|
||||
|
||||
if fn == nil {
|
||||
fn = &Function{
|
||||
Name: lineFunc,
|
||||
SystemName: lineFunc,
|
||||
Filename: lineFile,
|
||||
}
|
||||
fns[lineFunc] = fn
|
||||
p.Function = append(p.Function, fn)
|
||||
}
|
||||
loc.Line = []Line{
|
||||
{
|
||||
Function: fn,
|
||||
Line: lineNo,
|
||||
},
|
||||
}
|
||||
loc.Address = 0
|
||||
}
|
||||
|
||||
p.remapLocationIDs()
|
||||
p.remapFunctionIDs()
|
||||
p.remapMappingIDs()
|
||||
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,481 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Compact performs garbage collection on a profile to remove any
|
||||
// unreferenced fields. This is useful to reduce the size of a profile
|
||||
// after samples or locations have been removed.
|
||||
func (p *Profile) Compact() *Profile {
|
||||
p, _ = Merge([]*Profile{p})
|
||||
return p
|
||||
}
|
||||
|
||||
// Merge merges all the profiles in profs into a single Profile.
|
||||
// Returns a new profile independent of the input profiles. The merged
|
||||
// profile is compacted to eliminate unused samples, locations,
|
||||
// functions and mappings. Profiles must have identical profile sample
|
||||
// and period types or the merge will fail. profile.Period of the
|
||||
// resulting profile will be the maximum of all profiles, and
|
||||
// profile.TimeNanos will be the earliest nonzero one. Merges are
|
||||
// associative with the caveat of the first profile having some
|
||||
// specialization in how headers are combined. There may be other
|
||||
// subtleties now or in the future regarding associativity.
|
||||
func Merge(srcs []*Profile) (*Profile, error) {
|
||||
if len(srcs) == 0 {
|
||||
return nil, fmt.Errorf("no profiles to merge")
|
||||
}
|
||||
p, err := combineHeaders(srcs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pm := &profileMerger{
|
||||
p: p,
|
||||
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
|
||||
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
|
||||
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
|
||||
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
|
||||
}
|
||||
|
||||
for _, src := range srcs {
|
||||
// Clear the profile-specific hash tables
|
||||
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||
|
||||
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
|
||||
// The Mapping list has the property that the first mapping
|
||||
// represents the main binary. Take the first Mapping we see,
|
||||
// otherwise the operations below will add mappings in an
|
||||
// arbitrary order.
|
||||
pm.mapMapping(src.Mapping[0])
|
||||
}
|
||||
|
||||
for _, s := range src.Sample {
|
||||
if !isZeroSample(s) {
|
||||
pm.mapSample(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range p.Sample {
|
||||
if isZeroSample(s) {
|
||||
// If there are any zero samples, re-merge the profile to GC
|
||||
// them.
|
||||
return Merge([]*Profile{p})
|
||||
}
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Normalize normalizes the source profile by multiplying each value in profile by the
|
||||
// ratio of the sum of the base profile's values of that sample type to the sum of the
|
||||
// source profile's value of that sample type.
|
||||
func (p *Profile) Normalize(pb *Profile) error {
|
||||
|
||||
if err := p.compatible(pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseVals := make([]int64, len(p.SampleType))
|
||||
for _, s := range pb.Sample {
|
||||
for i, v := range s.Value {
|
||||
baseVals[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
srcVals := make([]int64, len(p.SampleType))
|
||||
for _, s := range p.Sample {
|
||||
for i, v := range s.Value {
|
||||
srcVals[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
normScale := make([]float64, len(baseVals))
|
||||
for i := range baseVals {
|
||||
if srcVals[i] == 0 {
|
||||
normScale[i] = 0.0
|
||||
} else {
|
||||
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
|
||||
}
|
||||
}
|
||||
p.ScaleN(normScale)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isZeroSample(s *Sample) bool {
|
||||
for _, v := range s.Value {
|
||||
if v != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type profileMerger struct {
|
||||
p *Profile
|
||||
|
||||
// Memoization tables within a profile.
|
||||
locationsByID map[uint64]*Location
|
||||
functionsByID map[uint64]*Function
|
||||
mappingsByID map[uint64]mapInfo
|
||||
|
||||
// Memoization tables for profile entities.
|
||||
samples map[sampleKey]*Sample
|
||||
locations map[locationKey]*Location
|
||||
functions map[functionKey]*Function
|
||||
mappings map[mappingKey]*Mapping
|
||||
}
|
||||
|
||||
type mapInfo struct {
|
||||
m *Mapping
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||
s := &Sample{
|
||||
Location: make([]*Location, len(src.Location)),
|
||||
Value: make([]int64, len(src.Value)),
|
||||
Label: make(map[string][]string, len(src.Label)),
|
||||
NumLabel: make(map[string][]int64, len(src.NumLabel)),
|
||||
NumUnit: make(map[string][]string, len(src.NumLabel)),
|
||||
}
|
||||
for i, l := range src.Location {
|
||||
s.Location[i] = pm.mapLocation(l)
|
||||
}
|
||||
for k, v := range src.Label {
|
||||
vv := make([]string, len(v))
|
||||
copy(vv, v)
|
||||
s.Label[k] = vv
|
||||
}
|
||||
for k, v := range src.NumLabel {
|
||||
u := src.NumUnit[k]
|
||||
vv := make([]int64, len(v))
|
||||
uu := make([]string, len(u))
|
||||
copy(vv, v)
|
||||
copy(uu, u)
|
||||
s.NumLabel[k] = vv
|
||||
s.NumUnit[k] = uu
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping. Add current values to the
|
||||
// existing sample.
|
||||
k := s.key()
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
copy(s.Value, src.Value)
|
||||
pm.samples[k] = s
|
||||
pm.p.Sample = append(pm.p.Sample, s)
|
||||
return s
|
||||
}
|
||||
|
||||
// key generates sampleKey to be used as a key for maps.
|
||||
func (sample *Sample) key() sampleKey {
|
||||
ids := make([]string, len(sample.Location))
|
||||
for i, l := range sample.Location {
|
||||
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||
}
|
||||
|
||||
labels := make([]string, 0, len(sample.Label))
|
||||
for k, v := range sample.Label {
|
||||
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||
}
|
||||
sort.Strings(labels)
|
||||
|
||||
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||
for k, v := range sample.NumLabel {
|
||||
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||
}
|
||||
sort.Strings(numlabels)
|
||||
|
||||
return sampleKey{
|
||||
strings.Join(ids, "|"),
|
||||
strings.Join(labels, ""),
|
||||
strings.Join(numlabels, ""),
|
||||
}
|
||||
}
|
||||
|
||||
type sampleKey struct {
|
||||
locations string
|
||||
labels string
|
||||
numlabels string
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||
return l
|
||||
}
|
||||
|
||||
mi := pm.mapMapping(src.Mapping)
|
||||
l := &Location{
|
||||
ID: uint64(len(pm.p.Location) + 1),
|
||||
Mapping: mi.m,
|
||||
Address: uint64(int64(src.Address) + mi.offset),
|
||||
Line: make([]Line, len(src.Line)),
|
||||
IsFolded: src.IsFolded,
|
||||
}
|
||||
for i, ln := range src.Line {
|
||||
l.Line[i] = pm.mapLine(ln)
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping ID.
|
||||
k := l.key()
|
||||
if ll, ok := pm.locations[k]; ok {
|
||||
pm.locationsByID[src.ID] = ll
|
||||
return ll
|
||||
}
|
||||
pm.locationsByID[src.ID] = l
|
||||
pm.locations[k] = l
|
||||
pm.p.Location = append(pm.p.Location, l)
|
||||
return l
|
||||
}
|
||||
|
||||
// key generates locationKey to be used as a key for maps.
|
||||
func (l *Location) key() locationKey {
|
||||
key := locationKey{
|
||||
addr: l.Address,
|
||||
isFolded: l.IsFolded,
|
||||
}
|
||||
if l.Mapping != nil {
|
||||
// Normalizes address to handle address space randomization.
|
||||
key.addr -= l.Mapping.Start
|
||||
key.mappingID = l.Mapping.ID
|
||||
}
|
||||
lines := make([]string, len(l.Line)*2)
|
||||
for i, line := range l.Line {
|
||||
if line.Function != nil {
|
||||
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
||||
}
|
||||
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
||||
}
|
||||
key.lines = strings.Join(lines, "|")
|
||||
return key
|
||||
}
|
||||
|
||||
type locationKey struct {
|
||||
addr, mappingID uint64
|
||||
lines string
|
||||
isFolded bool
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
||||
if src == nil {
|
||||
return mapInfo{}
|
||||
}
|
||||
|
||||
if mi, ok := pm.mappingsByID[src.ID]; ok {
|
||||
return mi
|
||||
}
|
||||
|
||||
// Check memoization tables.
|
||||
mk := src.key()
|
||||
if m, ok := pm.mappings[mk]; ok {
|
||||
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
|
||||
pm.mappingsByID[src.ID] = mi
|
||||
return mi
|
||||
}
|
||||
m := &Mapping{
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
}
|
||||
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||
|
||||
// Update memoization tables.
|
||||
pm.mappings[mk] = m
|
||||
mi := mapInfo{m, 0}
|
||||
pm.mappingsByID[src.ID] = mi
|
||||
return mi
|
||||
}
|
||||
|
||||
// key generates encoded strings of Mapping to be used as a key for
|
||||
// maps.
|
||||
func (m *Mapping) key() mappingKey {
|
||||
// Normalize addresses to handle address space randomization.
|
||||
// Round up to next 4K boundary to avoid minor discrepancies.
|
||||
const mapsizeRounding = 0x1000
|
||||
|
||||
size := m.Limit - m.Start
|
||||
size = size + mapsizeRounding - 1
|
||||
size = size - (size % mapsizeRounding)
|
||||
key := mappingKey{
|
||||
size: size,
|
||||
offset: m.Offset,
|
||||
}
|
||||
|
||||
switch {
|
||||
case m.BuildID != "":
|
||||
key.buildIDOrFile = m.BuildID
|
||||
case m.File != "":
|
||||
key.buildIDOrFile = m.File
|
||||
default:
|
||||
// A mapping containing neither build ID nor file name is a fake mapping. A
|
||||
// key with empty buildIDOrFile is used for fake mappings so that they are
|
||||
// treated as the same mapping during merging.
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
type mappingKey struct {
|
||||
size, offset uint64
|
||||
buildIDOrFile string
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLine(src Line) Line {
|
||||
ln := Line{
|
||||
Function: pm.mapFunction(src.Function),
|
||||
Line: src.Line,
|
||||
}
|
||||
return ln
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapFunction(src *Function) *Function {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
if f, ok := pm.functionsByID[src.ID]; ok {
|
||||
return f
|
||||
}
|
||||
k := src.key()
|
||||
if f, ok := pm.functions[k]; ok {
|
||||
pm.functionsByID[src.ID] = f
|
||||
return f
|
||||
}
|
||||
f := &Function{
|
||||
ID: uint64(len(pm.p.Function) + 1),
|
||||
Name: src.Name,
|
||||
SystemName: src.SystemName,
|
||||
Filename: src.Filename,
|
||||
StartLine: src.StartLine,
|
||||
}
|
||||
pm.functions[k] = f
|
||||
pm.functionsByID[src.ID] = f
|
||||
pm.p.Function = append(pm.p.Function, f)
|
||||
return f
|
||||
}
|
||||
|
||||
// key generates a struct to be used as a key for maps.
|
||||
func (f *Function) key() functionKey {
|
||||
return functionKey{
|
||||
f.StartLine,
|
||||
f.Name,
|
||||
f.SystemName,
|
||||
f.Filename,
|
||||
}
|
||||
}
|
||||
|
||||
type functionKey struct {
|
||||
startLine int64
|
||||
name, systemName, fileName string
|
||||
}
|
||||
|
||||
// combineHeaders checks that all profiles can be merged and returns
|
||||
// their combined profile.
|
||||
func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
for _, s := range srcs[1:] {
|
||||
if err := srcs[0].compatible(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var timeNanos, durationNanos, period int64
|
||||
var comments []string
|
||||
seenComments := map[string]bool{}
|
||||
var defaultSampleType string
|
||||
for _, s := range srcs {
|
||||
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||
timeNanos = s.TimeNanos
|
||||
}
|
||||
durationNanos += s.DurationNanos
|
||||
if period == 0 || period < s.Period {
|
||||
period = s.Period
|
||||
}
|
||||
for _, c := range s.Comments {
|
||||
if seen := seenComments[c]; !seen {
|
||||
comments = append(comments, c)
|
||||
seenComments[c] = true
|
||||
}
|
||||
}
|
||||
if defaultSampleType == "" {
|
||||
defaultSampleType = s.DefaultSampleType
|
||||
}
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
|
||||
|
||||
DropFrames: srcs[0].DropFrames,
|
||||
KeepFrames: srcs[0].KeepFrames,
|
||||
|
||||
TimeNanos: timeNanos,
|
||||
DurationNanos: durationNanos,
|
||||
PeriodType: srcs[0].PeriodType,
|
||||
Period: period,
|
||||
|
||||
Comments: comments,
|
||||
DefaultSampleType: defaultSampleType,
|
||||
}
|
||||
copy(p.SampleType, srcs[0].SampleType)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// compatible determines if two profiles can be compared/merged.
|
||||
// returns nil if the profiles are compatible; otherwise an error with
|
||||
// details on the incompatibility.
|
||||
func (p *Profile) compatible(pb *Profile) error {
|
||||
if !equalValueType(p.PeriodType, pb.PeriodType) {
|
||||
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
|
||||
}
|
||||
|
||||
if len(p.SampleType) != len(pb.SampleType) {
|
||||
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||
}
|
||||
|
||||
for i := range p.SampleType {
|
||||
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
|
||||
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equalValueType returns true if the two value types are semantically
|
||||
// equal. It ignores the internal fields used during encode/decode.
|
||||
func equalValueType(st1, st2 *ValueType) bool {
|
||||
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||
}
|
|
@ -0,0 +1,805 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package profile provides a representation of profile.proto and
|
||||
// methods to encode/decode profiles in this format.
|
||||
package profile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Profile is an in-memory representation of profile.proto.
|
||||
type Profile struct {
|
||||
SampleType []*ValueType
|
||||
DefaultSampleType string
|
||||
Sample []*Sample
|
||||
Mapping []*Mapping
|
||||
Location []*Location
|
||||
Function []*Function
|
||||
Comments []string
|
||||
|
||||
DropFrames string
|
||||
KeepFrames string
|
||||
|
||||
TimeNanos int64
|
||||
DurationNanos int64
|
||||
PeriodType *ValueType
|
||||
Period int64
|
||||
|
||||
// The following fields are modified during encoding and copying,
|
||||
// so are protected by a Mutex.
|
||||
encodeMu sync.Mutex
|
||||
|
||||
commentX []int64
|
||||
dropFramesX int64
|
||||
keepFramesX int64
|
||||
stringTable []string
|
||||
defaultSampleTypeX int64
|
||||
}
|
||||
|
||||
// ValueType corresponds to Profile.ValueType
|
||||
type ValueType struct {
|
||||
Type string // cpu, wall, inuse_space, etc
|
||||
Unit string // seconds, nanoseconds, bytes, etc
|
||||
|
||||
typeX int64
|
||||
unitX int64
|
||||
}
|
||||
|
||||
// Sample corresponds to Profile.Sample
|
||||
type Sample struct {
|
||||
Location []*Location
|
||||
Value []int64
|
||||
Label map[string][]string
|
||||
NumLabel map[string][]int64
|
||||
NumUnit map[string][]string
|
||||
|
||||
locationIDX []uint64
|
||||
labelX []label
|
||||
}
|
||||
|
||||
// label corresponds to Profile.Label
|
||||
type label struct {
|
||||
keyX int64
|
||||
// Exactly one of the two following values must be set
|
||||
strX int64
|
||||
numX int64 // Integer value for this label
|
||||
// can be set if numX has value
|
||||
unitX int64
|
||||
}
|
||||
|
||||
// Mapping corresponds to Profile.Mapping
|
||||
type Mapping struct {
|
||||
ID uint64
|
||||
Start uint64
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
File string
|
||||
BuildID string
|
||||
HasFunctions bool
|
||||
HasFilenames bool
|
||||
HasLineNumbers bool
|
||||
HasInlineFrames bool
|
||||
|
||||
fileX int64
|
||||
buildIDX int64
|
||||
}
|
||||
|
||||
// Location corresponds to Profile.Location
|
||||
type Location struct {
|
||||
ID uint64
|
||||
Mapping *Mapping
|
||||
Address uint64
|
||||
Line []Line
|
||||
IsFolded bool
|
||||
|
||||
mappingIDX uint64
|
||||
}
|
||||
|
||||
// Line corresponds to Profile.Line
|
||||
type Line struct {
|
||||
Function *Function
|
||||
Line int64
|
||||
|
||||
functionIDX uint64
|
||||
}
|
||||
|
||||
// Function corresponds to Profile.Function
|
||||
type Function struct {
|
||||
ID uint64
|
||||
Name string
|
||||
SystemName string
|
||||
Filename string
|
||||
StartLine int64
|
||||
|
||||
nameX int64
|
||||
systemNameX int64
|
||||
filenameX int64
|
||||
}
|
||||
|
||||
// Parse parses a profile and checks for its validity. The input
|
||||
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||
// profile formats which may be unsupported in the future.
|
||||
func Parse(r io.Reader) (*Profile, error) {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseData(data)
|
||||
}
|
||||
|
||||
// ParseData parses a profile from a buffer and checks for its
|
||||
// validity.
|
||||
func ParseData(data []byte) (*Profile, error) {
|
||||
var p *Profile
|
||||
var err error
|
||||
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err == nil {
|
||||
data, err = ioutil.ReadAll(gz)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||
}
|
||||
}
|
||||
if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
|
||||
p, err = parseLegacy(data)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing profile: %v", err)
|
||||
}
|
||||
|
||||
if err := p.CheckValid(); err != nil {
|
||||
return nil, fmt.Errorf("malformed profile: %v", err)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var errUnrecognized = fmt.Errorf("unrecognized profile format")
|
||||
var errMalformed = fmt.Errorf("malformed profile format")
|
||||
var errNoData = fmt.Errorf("empty input file")
|
||||
var errConcatProfile = fmt.Errorf("concatenated profiles detected")
|
||||
|
||||
func parseLegacy(data []byte) (*Profile, error) {
|
||||
parsers := []func([]byte) (*Profile, error){
|
||||
parseCPU,
|
||||
parseHeap,
|
||||
parseGoCount, // goroutine, threadcreate
|
||||
parseThread,
|
||||
parseContention,
|
||||
parseJavaProfile,
|
||||
}
|
||||
|
||||
for _, parser := range parsers {
|
||||
p, err := parser(data)
|
||||
if err == nil {
|
||||
p.addLegacyFrameInfo()
|
||||
return p, nil
|
||||
}
|
||||
if err != errUnrecognized {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
|
||||
// ParseUncompressed parses an uncompressed protobuf into a profile.
|
||||
func ParseUncompressed(data []byte) (*Profile, error) {
|
||||
if len(data) == 0 {
|
||||
return nil, errNoData
|
||||
}
|
||||
p := &Profile{}
|
||||
if err := unmarshal(data, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := p.postDecode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
|
||||
|
||||
// massageMappings applies heuristic-based changes to the profile
|
||||
// mappings to account for quirks of some environments.
|
||||
func (p *Profile) massageMappings() {
|
||||
// Merge adjacent regions with matching names, checking that the offsets match
|
||||
if len(p.Mapping) > 1 {
|
||||
mappings := []*Mapping{p.Mapping[0]}
|
||||
for _, m := range p.Mapping[1:] {
|
||||
lm := mappings[len(mappings)-1]
|
||||
if adjacent(lm, m) {
|
||||
lm.Limit = m.Limit
|
||||
if m.File != "" {
|
||||
lm.File = m.File
|
||||
}
|
||||
if m.BuildID != "" {
|
||||
lm.BuildID = m.BuildID
|
||||
}
|
||||
p.updateLocationMapping(m, lm)
|
||||
continue
|
||||
}
|
||||
mappings = append(mappings, m)
|
||||
}
|
||||
p.Mapping = mappings
|
||||
}
|
||||
|
||||
// Use heuristics to identify main binary and move it to the top of the list of mappings
|
||||
for i, m := range p.Mapping {
|
||||
file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
|
||||
if len(file) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(libRx.FindStringSubmatch(file)) > 0 {
|
||||
continue
|
||||
}
|
||||
if file[0] == '[' {
|
||||
continue
|
||||
}
|
||||
// Swap what we guess is main to position 0.
|
||||
p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
|
||||
break
|
||||
}
|
||||
|
||||
// Keep the mapping IDs neatly sorted
|
||||
for i, m := range p.Mapping {
|
||||
m.ID = uint64(i + 1)
|
||||
}
|
||||
}
|
||||
|
||||
// adjacent returns whether two mapping entries represent the same
|
||||
// mapping that has been split into two. Check that their addresses are adjacent,
|
||||
// and if the offsets match, if they are available.
|
||||
func adjacent(m1, m2 *Mapping) bool {
|
||||
if m1.File != "" && m2.File != "" {
|
||||
if m1.File != m2.File {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m1.BuildID != "" && m2.BuildID != "" {
|
||||
if m1.BuildID != m2.BuildID {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m1.Limit != m2.Start {
|
||||
return false
|
||||
}
|
||||
if m1.Offset != 0 && m2.Offset != 0 {
|
||||
offset := m1.Offset + (m1.Limit - m1.Start)
|
||||
if offset != m2.Offset {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Profile) updateLocationMapping(from, to *Mapping) {
|
||||
for _, l := range p.Location {
|
||||
if l.Mapping == from {
|
||||
l.Mapping = to
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func serialize(p *Profile) []byte {
|
||||
p.encodeMu.Lock()
|
||||
p.preEncode()
|
||||
b := marshal(p)
|
||||
p.encodeMu.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
// Write writes the profile as a gzip-compressed marshaled protobuf.
|
||||
func (p *Profile) Write(w io.Writer) error {
|
||||
zw := gzip.NewWriter(w)
|
||||
defer zw.Close()
|
||||
_, err := zw.Write(serialize(p))
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteUncompressed writes the profile as a marshaled protobuf.
|
||||
func (p *Profile) WriteUncompressed(w io.Writer) error {
|
||||
_, err := w.Write(serialize(p))
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckValid tests whether the profile is valid. Checks include, but are
|
||||
// not limited to:
|
||||
// - len(Profile.Sample[n].value) == len(Profile.value_unit)
|
||||
// - Sample.id has a corresponding Profile.Location
|
||||
func (p *Profile) CheckValid() error {
|
||||
// Check that sample values are consistent
|
||||
sampleLen := len(p.SampleType)
|
||||
if sampleLen == 0 && len(p.Sample) != 0 {
|
||||
return fmt.Errorf("missing sample type information")
|
||||
}
|
||||
for _, s := range p.Sample {
|
||||
if s == nil {
|
||||
return fmt.Errorf("profile has nil sample")
|
||||
}
|
||||
if len(s.Value) != sampleLen {
|
||||
return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
|
||||
}
|
||||
for _, l := range s.Location {
|
||||
if l == nil {
|
||||
return fmt.Errorf("sample has nil location")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all mappings/locations/functions are in the tables
|
||||
// Check that there are no duplicate ids
|
||||
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||
for _, m := range p.Mapping {
|
||||
if m == nil {
|
||||
return fmt.Errorf("profile has nil mapping")
|
||||
}
|
||||
if m.ID == 0 {
|
||||
return fmt.Errorf("found mapping with reserved ID=0")
|
||||
}
|
||||
if mappings[m.ID] != nil {
|
||||
return fmt.Errorf("multiple mappings with same id: %d", m.ID)
|
||||
}
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
for _, f := range p.Function {
|
||||
if f == nil {
|
||||
return fmt.Errorf("profile has nil function")
|
||||
}
|
||||
if f.ID == 0 {
|
||||
return fmt.Errorf("found function with reserved ID=0")
|
||||
}
|
||||
if functions[f.ID] != nil {
|
||||
return fmt.Errorf("multiple functions with same id: %d", f.ID)
|
||||
}
|
||||
functions[f.ID] = f
|
||||
}
|
||||
locations := make(map[uint64]*Location, len(p.Location))
|
||||
for _, l := range p.Location {
|
||||
if l == nil {
|
||||
return fmt.Errorf("profile has nil location")
|
||||
}
|
||||
if l.ID == 0 {
|
||||
return fmt.Errorf("found location with reserved id=0")
|
||||
}
|
||||
if locations[l.ID] != nil {
|
||||
return fmt.Errorf("multiple locations with same id: %d", l.ID)
|
||||
}
|
||||
locations[l.ID] = l
|
||||
if m := l.Mapping; m != nil {
|
||||
if m.ID == 0 || mappings[m.ID] != m {
|
||||
return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
|
||||
}
|
||||
}
|
||||
for _, ln := range l.Line {
|
||||
f := ln.Function
|
||||
if f == nil {
|
||||
return fmt.Errorf("location id: %d has a line with nil function", l.ID)
|
||||
}
|
||||
if f.ID == 0 || functions[f.ID] != f {
|
||||
return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Aggregate merges the locations in the profile into equivalence
|
||||
// classes preserving the request attributes. It also updates the
|
||||
// samples to point to the merged locations.
|
||||
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
|
||||
for _, m := range p.Mapping {
|
||||
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
||||
m.HasFunctions = m.HasFunctions && function
|
||||
m.HasFilenames = m.HasFilenames && filename
|
||||
m.HasLineNumbers = m.HasLineNumbers && linenumber
|
||||
}
|
||||
|
||||
// Aggregate functions
|
||||
if !function || !filename {
|
||||
for _, f := range p.Function {
|
||||
if !function {
|
||||
f.Name = ""
|
||||
f.SystemName = ""
|
||||
}
|
||||
if !filename {
|
||||
f.Filename = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate locations
|
||||
if !inlineFrame || !address || !linenumber {
|
||||
for _, l := range p.Location {
|
||||
if !inlineFrame && len(l.Line) > 1 {
|
||||
l.Line = l.Line[len(l.Line)-1:]
|
||||
}
|
||||
if !linenumber {
|
||||
for i := range l.Line {
|
||||
l.Line[i].Line = 0
|
||||
}
|
||||
}
|
||||
if !address {
|
||||
l.Address = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p.CheckValid()
|
||||
}
|
||||
|
||||
// NumLabelUnits returns a map of numeric label keys to the units
|
||||
// associated with those keys and a map of those keys to any units
|
||||
// that were encountered but not used.
|
||||
// Unit for a given key is the first encountered unit for that key. If multiple
|
||||
// units are encountered for values paired with a particular key, then the first
|
||||
// unit encountered is used and all other units are returned in sorted order
|
||||
// in map of ignored units.
|
||||
// If no units are encountered for a particular key, the unit is then inferred
|
||||
// based on the key.
|
||||
func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
|
||||
numLabelUnits := map[string]string{}
|
||||
ignoredUnits := map[string]map[string]bool{}
|
||||
encounteredKeys := map[string]bool{}
|
||||
|
||||
// Determine units based on numeric tags for each sample.
|
||||
for _, s := range p.Sample {
|
||||
for k := range s.NumLabel {
|
||||
encounteredKeys[k] = true
|
||||
for _, unit := range s.NumUnit[k] {
|
||||
if unit == "" {
|
||||
continue
|
||||
}
|
||||
if wantUnit, ok := numLabelUnits[k]; !ok {
|
||||
numLabelUnits[k] = unit
|
||||
} else if wantUnit != unit {
|
||||
if v, ok := ignoredUnits[k]; ok {
|
||||
v[unit] = true
|
||||
} else {
|
||||
ignoredUnits[k] = map[string]bool{unit: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Infer units for keys without any units associated with
|
||||
// numeric tag values.
|
||||
for key := range encounteredKeys {
|
||||
unit := numLabelUnits[key]
|
||||
if unit == "" {
|
||||
switch key {
|
||||
case "alignment", "request":
|
||||
numLabelUnits[key] = "bytes"
|
||||
default:
|
||||
numLabelUnits[key] = key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy ignored units into more readable format
|
||||
unitsIgnored := make(map[string][]string, len(ignoredUnits))
|
||||
for key, values := range ignoredUnits {
|
||||
units := make([]string, len(values))
|
||||
i := 0
|
||||
for unit := range values {
|
||||
units[i] = unit
|
||||
i++
|
||||
}
|
||||
sort.Strings(units)
|
||||
unitsIgnored[key] = units
|
||||
}
|
||||
|
||||
return numLabelUnits, unitsIgnored
|
||||
}
|
||||
|
||||
// String dumps a text representation of a profile. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (p *Profile) String() string {
|
||||
ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
|
||||
for _, c := range p.Comments {
|
||||
ss = append(ss, "Comment: "+c)
|
||||
}
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
|
||||
if p.TimeNanos != 0 {
|
||||
ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
|
||||
}
|
||||
if p.DurationNanos != 0 {
|
||||
ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
|
||||
}
|
||||
|
||||
ss = append(ss, "Samples:")
|
||||
var sh1 string
|
||||
for _, s := range p.SampleType {
|
||||
dflt := ""
|
||||
if s.Type == p.DefaultSampleType {
|
||||
dflt = "[dflt]"
|
||||
}
|
||||
sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
|
||||
}
|
||||
ss = append(ss, strings.TrimSpace(sh1))
|
||||
for _, s := range p.Sample {
|
||||
ss = append(ss, s.string())
|
||||
}
|
||||
|
||||
ss = append(ss, "Locations")
|
||||
for _, l := range p.Location {
|
||||
ss = append(ss, l.string())
|
||||
}
|
||||
|
||||
ss = append(ss, "Mappings")
|
||||
for _, m := range p.Mapping {
|
||||
ss = append(ss, m.string())
|
||||
}
|
||||
|
||||
return strings.Join(ss, "\n") + "\n"
|
||||
}
|
||||
|
||||
// string dumps a text representation of a mapping. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (m *Mapping) string() string {
|
||||
bits := ""
|
||||
if m.HasFunctions {
|
||||
bits = bits + "[FN]"
|
||||
}
|
||||
if m.HasFilenames {
|
||||
bits = bits + "[FL]"
|
||||
}
|
||||
if m.HasLineNumbers {
|
||||
bits = bits + "[LN]"
|
||||
}
|
||||
if m.HasInlineFrames {
|
||||
bits = bits + "[IN]"
|
||||
}
|
||||
return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
|
||||
m.ID,
|
||||
m.Start, m.Limit, m.Offset,
|
||||
m.File,
|
||||
m.BuildID,
|
||||
bits)
|
||||
}
|
||||
|
||||
// string dumps a text representation of a location. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (l *Location) string() string {
|
||||
ss := []string{}
|
||||
locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
|
||||
if m := l.Mapping; m != nil {
|
||||
locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
|
||||
}
|
||||
if l.IsFolded {
|
||||
locStr = locStr + "[F] "
|
||||
}
|
||||
if len(l.Line) == 0 {
|
||||
ss = append(ss, locStr)
|
||||
}
|
||||
for li := range l.Line {
|
||||
lnStr := "??"
|
||||
if fn := l.Line[li].Function; fn != nil {
|
||||
lnStr = fmt.Sprintf("%s %s:%d s=%d",
|
||||
fn.Name,
|
||||
fn.Filename,
|
||||
l.Line[li].Line,
|
||||
fn.StartLine)
|
||||
if fn.Name != fn.SystemName {
|
||||
lnStr = lnStr + "(" + fn.SystemName + ")"
|
||||
}
|
||||
}
|
||||
ss = append(ss, locStr+lnStr)
|
||||
// Do not print location details past the first line
|
||||
locStr = " "
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
// string dumps a text representation of a sample. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (s *Sample) string() string {
|
||||
ss := []string{}
|
||||
var sv string
|
||||
for _, v := range s.Value {
|
||||
sv = fmt.Sprintf("%s %10d", sv, v)
|
||||
}
|
||||
sv = sv + ": "
|
||||
for _, l := range s.Location {
|
||||
sv = sv + fmt.Sprintf("%d ", l.ID)
|
||||
}
|
||||
ss = append(ss, sv)
|
||||
const labelHeader = " "
|
||||
if len(s.Label) > 0 {
|
||||
ss = append(ss, labelHeader+labelsToString(s.Label))
|
||||
}
|
||||
if len(s.NumLabel) > 0 {
|
||||
ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
// labelsToString returns a string representation of a
|
||||
// map representing labels.
|
||||
func labelsToString(labels map[string][]string) string {
|
||||
ls := []string{}
|
||||
for k, v := range labels {
|
||||
ls = append(ls, fmt.Sprintf("%s:%v", k, v))
|
||||
}
|
||||
sort.Strings(ls)
|
||||
return strings.Join(ls, " ")
|
||||
}
|
||||
|
||||
// numLabelsToString returns a string representation of a map
|
||||
// representing numeric labels.
|
||||
func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
|
||||
ls := []string{}
|
||||
for k, v := range numLabels {
|
||||
units := numUnits[k]
|
||||
var labelString string
|
||||
if len(units) == len(v) {
|
||||
values := make([]string, len(v))
|
||||
for i, vv := range v {
|
||||
values[i] = fmt.Sprintf("%d %s", vv, units[i])
|
||||
}
|
||||
labelString = fmt.Sprintf("%s:%v", k, values)
|
||||
} else {
|
||||
labelString = fmt.Sprintf("%s:%v", k, v)
|
||||
}
|
||||
ls = append(ls, labelString)
|
||||
}
|
||||
sort.Strings(ls)
|
||||
return strings.Join(ls, " ")
|
||||
}
|
||||
|
||||
// SetLabel sets the specified key to the specified value for all samples in the
|
||||
// profile.
|
||||
func (p *Profile) SetLabel(key string, value []string) {
|
||||
for _, sample := range p.Sample {
|
||||
if sample.Label == nil {
|
||||
sample.Label = map[string][]string{key: value}
|
||||
} else {
|
||||
sample.Label[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveLabel removes all labels associated with the specified key for all
|
||||
// samples in the profile.
|
||||
func (p *Profile) RemoveLabel(key string) {
|
||||
for _, sample := range p.Sample {
|
||||
delete(sample.Label, key)
|
||||
}
|
||||
}
|
||||
|
||||
// HasLabel returns true if a sample has a label with indicated key and value.
|
||||
func (s *Sample) HasLabel(key, value string) bool {
|
||||
for _, v := range s.Label[key] {
|
||||
if v == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||
// otherwise.
|
||||
func (s *Sample) DiffBaseSample() bool {
|
||||
return s.HasLabel("pprof::base", "true")
|
||||
}
|
||||
|
||||
// Scale multiplies all sample values in a profile by a constant and keeps
|
||||
// only samples that have at least one non-zero value.
|
||||
func (p *Profile) Scale(ratio float64) {
|
||||
if ratio == 1 {
|
||||
return
|
||||
}
|
||||
ratios := make([]float64, len(p.SampleType))
|
||||
for i := range p.SampleType {
|
||||
ratios[i] = ratio
|
||||
}
|
||||
p.ScaleN(ratios)
|
||||
}
|
||||
|
||||
// ScaleN multiplies each sample values in a sample by a different amount
|
||||
// and keeps only samples that have at least one non-zero value.
|
||||
func (p *Profile) ScaleN(ratios []float64) error {
|
||||
if len(p.SampleType) != len(ratios) {
|
||||
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
|
||||
}
|
||||
allOnes := true
|
||||
for _, r := range ratios {
|
||||
if r != 1 {
|
||||
allOnes = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allOnes {
|
||||
return nil
|
||||
}
|
||||
fillIdx := 0
|
||||
for _, s := range p.Sample {
|
||||
keepSample := false
|
||||
for i, v := range s.Value {
|
||||
if ratios[i] != 1 {
|
||||
val := int64(math.Round(float64(v) * ratios[i]))
|
||||
s.Value[i] = val
|
||||
keepSample = keepSample || val != 0
|
||||
}
|
||||
}
|
||||
if keepSample {
|
||||
p.Sample[fillIdx] = s
|
||||
fillIdx++
|
||||
}
|
||||
}
|
||||
p.Sample = p.Sample[:fillIdx]
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasFunctions determines if all locations in this profile have
|
||||
// symbolized function information.
|
||||
func (p *Profile) HasFunctions() bool {
|
||||
for _, l := range p.Location {
|
||||
if l.Mapping != nil && !l.Mapping.HasFunctions {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasFileLines determines if all locations in this profile have
|
||||
// symbolized file and line number information.
|
||||
func (p *Profile) HasFileLines() bool {
|
||||
for _, l := range p.Location {
|
||||
if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||
// locations can't be symbolized in principle, at least now. Examples are
|
||||
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||
func (m *Mapping) Unsymbolizable() bool {
|
||||
name := filepath.Base(m.File)
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||
}
|
||||
|
||||
// Copy makes a fully independent copy of a profile.
|
||||
func (p *Profile) Copy() *Profile {
|
||||
pp := &Profile{}
|
||||
if err := unmarshal(serialize(p), pp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := pp.postDecode(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pp
|
||||
}
|
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is a simple protocol buffer encoder and decoder.
|
||||
// The format is described at
|
||||
// https://developers.google.com/protocol-buffers/docs/encoding
|
||||
//
|
||||
// A protocol message must implement the message interface:
|
||||
// decoder() []decoder
|
||||
// encode(*buffer)
|
||||
//
|
||||
// The decode method returns a slice indexed by field number that gives the
|
||||
// function to decode that field.
|
||||
// The encode method encodes its receiver into the given buffer.
|
||||
//
|
||||
// The two methods are simple enough to be implemented by hand rather than
|
||||
// by using a protocol compiler.
|
||||
//
|
||||
// See profile.go for examples of messages implementing this interface.
|
||||
//
|
||||
// There is no support for groups, message sets, or "has" bits.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type buffer struct {
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
}
|
||||
|
||||
type decoder func(*buffer, message) error
|
||||
|
||||
type message interface {
|
||||
decoder() []decoder
|
||||
encode(*buffer)
|
||||
}
|
||||
|
||||
func marshal(m message) []byte {
|
||||
var b buffer
|
||||
m.encode(&b)
|
||||
return b.data
|
||||
}
|
||||
|
||||
func encodeVarint(b *buffer, x uint64) {
|
||||
for x >= 128 {
|
||||
b.data = append(b.data, byte(x)|0x80)
|
||||
x >>= 7
|
||||
}
|
||||
b.data = append(b.data, byte(x))
|
||||
}
|
||||
|
||||
func encodeLength(b *buffer, tag int, len int) {
|
||||
encodeVarint(b, uint64(tag)<<3|2)
|
||||
encodeVarint(b, uint64(len))
|
||||
}
|
||||
|
||||
func encodeUint64(b *buffer, tag int, x uint64) {
|
||||
// append varint to b.data
|
||||
encodeVarint(b, uint64(tag)<<3)
|
||||
encodeVarint(b, x)
|
||||
}
|
||||
|
||||
func encodeUint64s(b *buffer, tag int, x []uint64) {
|
||||
if len(x) > 2 {
|
||||
// Use packed encoding
|
||||
n1 := len(b.data)
|
||||
for _, u := range x {
|
||||
encodeVarint(b, u)
|
||||
}
|
||||
n2 := len(b.data)
|
||||
encodeLength(b, tag, n2-n1)
|
||||
n3 := len(b.data)
|
||||
copy(b.tmp[:], b.data[n2:n3])
|
||||
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||
return
|
||||
}
|
||||
for _, u := range x {
|
||||
encodeUint64(b, tag, u)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeUint64Opt(b *buffer, tag int, x uint64) {
|
||||
if x == 0 {
|
||||
return
|
||||
}
|
||||
encodeUint64(b, tag, x)
|
||||
}
|
||||
|
||||
func encodeInt64(b *buffer, tag int, x int64) {
|
||||
u := uint64(x)
|
||||
encodeUint64(b, tag, u)
|
||||
}
|
||||
|
||||
func encodeInt64s(b *buffer, tag int, x []int64) {
|
||||
if len(x) > 2 {
|
||||
// Use packed encoding
|
||||
n1 := len(b.data)
|
||||
for _, u := range x {
|
||||
encodeVarint(b, uint64(u))
|
||||
}
|
||||
n2 := len(b.data)
|
||||
encodeLength(b, tag, n2-n1)
|
||||
n3 := len(b.data)
|
||||
copy(b.tmp[:], b.data[n2:n3])
|
||||
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||
return
|
||||
}
|
||||
for _, u := range x {
|
||||
encodeInt64(b, tag, u)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeInt64Opt(b *buffer, tag int, x int64) {
|
||||
if x == 0 {
|
||||
return
|
||||
}
|
||||
encodeInt64(b, tag, x)
|
||||
}
|
||||
|
||||
func encodeString(b *buffer, tag int, x string) {
|
||||
encodeLength(b, tag, len(x))
|
||||
b.data = append(b.data, x...)
|
||||
}
|
||||
|
||||
func encodeStrings(b *buffer, tag int, x []string) {
|
||||
for _, s := range x {
|
||||
encodeString(b, tag, s)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeBool(b *buffer, tag int, x bool) {
|
||||
if x {
|
||||
encodeUint64(b, tag, 1)
|
||||
} else {
|
||||
encodeUint64(b, tag, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeBoolOpt(b *buffer, tag int, x bool) {
|
||||
if x {
|
||||
encodeBool(b, tag, x)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeMessage(b *buffer, tag int, m message) {
|
||||
n1 := len(b.data)
|
||||
m.encode(b)
|
||||
n2 := len(b.data)
|
||||
encodeLength(b, tag, n2-n1)
|
||||
n3 := len(b.data)
|
||||
copy(b.tmp[:], b.data[n2:n3])
|
||||
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||
}
|
||||
|
||||
func unmarshal(data []byte, m message) (err error) {
|
||||
b := buffer{data: data, typ: 2}
|
||||
return decodeMessage(&b, m)
|
||||
}
|
||||
|
||||
func le64(p []byte) uint64 {
|
||||
return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
|
||||
}
|
||||
|
||||
func le32(p []byte) uint32 {
|
||||
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||
}
|
||||
|
||||
func decodeVarint(data []byte) (uint64, []byte, error) {
|
||||
var u uint64
|
||||
for i := 0; ; i++ {
|
||||
if i >= 10 || i >= len(data) {
|
||||
return 0, nil, errors.New("bad varint")
|
||||
}
|
||||
u |= uint64(data[i]&0x7F) << uint(7*i)
|
||||
if data[i]&0x80 == 0 {
|
||||
return u, data[i+1:], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeField(b *buffer, data []byte) ([]byte, error) {
|
||||
x, data, err := decodeVarint(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.field = int(x >> 3)
|
||||
b.typ = int(x & 7)
|
||||
b.data = nil
|
||||
b.u64 = 0
|
||||
switch b.typ {
|
||||
case 0:
|
||||
b.u64, data, err = decodeVarint(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case 1:
|
||||
if len(data) < 8 {
|
||||
return nil, errors.New("not enough data")
|
||||
}
|
||||
b.u64 = le64(data[:8])
|
||||
data = data[8:]
|
||||
case 2:
|
||||
var n uint64
|
||||
n, data, err = decodeVarint(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > uint64(len(data)) {
|
||||
return nil, errors.New("too much data")
|
||||
}
|
||||
b.data = data[:n]
|
||||
data = data[n:]
|
||||
case 5:
|
||||
if len(data) < 4 {
|
||||
return nil, errors.New("not enough data")
|
||||
}
|
||||
b.u64 = uint64(le32(data[:4]))
|
||||
data = data[4:]
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown wire type: %d", b.typ)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func checkType(b *buffer, typ int) error {
|
||||
if b.typ != typ {
|
||||
return errors.New("type mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeMessage(b *buffer, m message) error {
|
||||
if err := checkType(b, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
dec := m.decoder()
|
||||
data := b.data
|
||||
for len(data) > 0 {
|
||||
// pull varint field# + type
|
||||
var err error
|
||||
data, err = decodeField(b, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.field >= len(dec) || dec[b.field] == nil {
|
||||
continue
|
||||
}
|
||||
if err := dec[b.field](b, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt64(b *buffer, x *int64) error {
|
||||
if err := checkType(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = int64(b.u64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt64s(b *buffer, x *[]int64) error {
|
||||
if b.typ == 2 {
|
||||
// Packed encoding
|
||||
data := b.data
|
||||
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, int64(u))
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var i int64
|
||||
if err := decodeInt64(b, &i); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, i)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUint64(b *buffer, x *uint64) error {
|
||||
if err := checkType(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = b.u64
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||
if b.typ == 2 {
|
||||
data := b.data
|
||||
// Packed encoding
|
||||
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, u)
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var u uint64
|
||||
if err := decodeUint64(b, &u); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeString(b *buffer, x *string) error {
|
||||
if err := checkType(b, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = string(b.data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStrings(b *buffer, x *[]string) error {
|
||||
var s string
|
||||
if err := decodeString(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeBool(b *buffer, x *bool) error {
|
||||
if err := checkType(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if int64(b.u64) == 0 {
|
||||
*x = false
|
||||
} else {
|
||||
*x = true
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Implements methods to remove frames from profiles.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
reservedNames = []string{"(anonymous namespace)", "operator()"}
|
||||
bracketRx = func() *regexp.Regexp {
|
||||
var quotedNames []string
|
||||
for _, name := range append(reservedNames, "(") {
|
||||
quotedNames = append(quotedNames, regexp.QuoteMeta(name))
|
||||
}
|
||||
return regexp.MustCompile(strings.Join(quotedNames, "|"))
|
||||
}()
|
||||
)
|
||||
|
||||
// simplifyFunc does some primitive simplification of function names.
|
||||
func simplifyFunc(f string) string {
|
||||
// Account for leading '.' on the PPC ELF v1 ABI.
|
||||
funcName := strings.TrimPrefix(f, ".")
|
||||
// Account for unsimplified names -- try to remove the argument list by trimming
|
||||
// starting from the first '(', but skipping reserved names that have '('.
|
||||
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
|
||||
foundReserved := false
|
||||
for _, res := range reservedNames {
|
||||
if funcName[ind[0]:ind[1]] == res {
|
||||
foundReserved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundReserved {
|
||||
funcName = funcName[:ind[0]]
|
||||
break
|
||||
}
|
||||
}
|
||||
return funcName
|
||||
}
|
||||
|
||||
// Prune removes all nodes beneath a node matching dropRx, and not
|
||||
// matching keepRx. If the root node of a Sample matches, the sample
|
||||
// will have an empty stack.
|
||||
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
||||
prune := make(map[uint64]bool)
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
for _, loc := range p.Location {
|
||||
var i int
|
||||
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i >= 0 {
|
||||
// Found matching entry to prune.
|
||||
pruneBeneath[loc.ID] = true
|
||||
|
||||
// Remove the matching location.
|
||||
if i == len(loc.Line)-1 {
|
||||
// Matched the top entry: prune the whole location.
|
||||
prune[loc.ID] = true
|
||||
} else {
|
||||
loc.Line = loc.Line[i+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prune locs from each Sample
|
||||
for _, sample := range p.Sample {
|
||||
// Scan from the root to the leaves to find the prune location.
|
||||
// Do not prune frames before the first user frame, to avoid
|
||||
// pruning everything.
|
||||
foundUser := false
|
||||
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||
id := sample.Location[i].ID
|
||||
if !prune[id] && !pruneBeneath[id] {
|
||||
foundUser = true
|
||||
continue
|
||||
}
|
||||
if !foundUser {
|
||||
continue
|
||||
}
|
||||
if prune[id] {
|
||||
sample.Location = sample.Location[i+1:]
|
||||
break
|
||||
}
|
||||
if pruneBeneath[id] {
|
||||
sample.Location = sample.Location[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUninteresting prunes and elides profiles using built-in
|
||||
// tables of uninteresting function names.
|
||||
func (p *Profile) RemoveUninteresting() error {
|
||||
var keep, drop *regexp.Regexp
|
||||
var err error
|
||||
|
||||
if p.DropFrames != "" {
|
||||
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
|
||||
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
|
||||
}
|
||||
if p.KeepFrames != "" {
|
||||
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
|
||||
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
|
||||
}
|
||||
}
|
||||
p.Prune(drop, keep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
|
||||
//
|
||||
// Please see the example below to understand this method as well as
|
||||
// the difference from Prune method.
|
||||
//
|
||||
// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
|
||||
//
|
||||
// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
|
||||
// Prune(A, nil) returns [B,C,B,D] by removing A itself.
|
||||
//
|
||||
// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
|
||||
// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
|
||||
func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
for _, loc := range p.Location {
|
||||
for i := 0; i < len(loc.Line); i++ {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
// Found matching entry to prune.
|
||||
pruneBeneath[loc.ID] = true
|
||||
loc.Line = loc.Line[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prune locs from each Sample
|
||||
for _, sample := range p.Sample {
|
||||
// Scan from the bottom leaf to the root to find the prune location.
|
||||
for i, loc := range sample.Location {
|
||||
if pruneBeneath[loc.ID] {
|
||||
sample.Location = sample.Location[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
.idea/
|
||||
.test/
|
||||
examples/_*
|
|
@ -1,56 +0,0 @@
|
|||
# Version v1.4.7-v1.4.8
|
||||
* Documentation updates.
|
||||
* Small linter cleanups.
|
||||
* Added example in test.
|
||||
|
||||
# Version v1.4.6
|
||||
|
||||
* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18.
|
||||
* Add example directories with example and tests for issues.
|
||||
|
||||
# Version v1.4.4-v1.4.5
|
||||
|
||||
* Fix of checksum problem because of forced tag. No changes to the code.
|
||||
|
||||
# Version v1.4.1
|
||||
|
||||
* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
|
||||
|
||||
# Version v1.4.0
|
||||
|
||||
* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
|
||||
|
||||
# Version v1.3.1
|
||||
|
||||
* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
|
||||
fixes upstream issue 93.
|
||||
|
||||
|
||||
# Version v1.3.0
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
|
||||
to Line struct".
|
||||
|
||||
# Version v1.2.1
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
|
||||
code in readme".
|
||||
* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
|
||||
to comment wording".
|
||||
* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
|
||||
spurious newlines from log messages".
|
||||
|
||||
# Version v1.2.0
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
|
||||
problem for never return the last line if it's not followed by a newline".
|
||||
* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
|
||||
deprecated os.SEEK consts". The changes bumped the minimal supported Go
|
||||
release to 1.9.
|
||||
|
||||
# Version v1.1.0
|
||||
|
||||
* migration to go modules.
|
||||
* release of master branch of the dormant upstream, because it contains
|
||||
fixes and improvement no present in the tagged release.
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
FROM golang
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
|
||||
ADD . $GOPATH/src/github.com/nxadm/tail/
|
||||
|
||||
# expecting to fetch dependencies successfully.
|
||||
RUN go get -v github.com/nxadm/tail
|
||||
|
||||
# expecting to run the test successfully.
|
||||
RUN go test -v github.com/nxadm/tail
|
||||
|
||||
# expecting to install successfully
|
||||
RUN go install -v github.com/nxadm/tail
|
||||
RUN go install -v github.com/nxadm/tail/cmd/gotail
|
||||
|
||||
RUN $GOPATH/bin/gotail -h || true
|
||||
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
CMD ["gotail"]
|
|
@ -1,21 +0,0 @@
|
|||
# The MIT License (MIT)
|
||||
|
||||
# © Copyright 2015 Hewlett Packard Enterprise Development LP
|
||||
Copyright (c) 2014 ActiveState
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,44 +0,0 @@
|
|||
[](https://pkg.go.dev/github.com/nxadm/tail)
|
||||
|
||||
# tail functionality in Go
|
||||
|
||||
nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
||||
program. The library comes with full support for truncation/move detection as
|
||||
it is designed to work with log rotation tools. The library works on all
|
||||
operating systems supported by Go, including POSIX systems like Linux and
|
||||
*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
||||
|
||||
A simple example:
|
||||
|
||||
```Go
|
||||
// Create a tail
|
||||
t, err := tail.TailFile(
|
||||
"/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Print the text of each received line
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
```
|
||||
|
||||
See [API documentation](https://pkg.go.dev/github.com/nxadm/tail).
|
||||
|
||||
## Installing
|
||||
|
||||
go get github.com/nxadm/tail/...
|
||||
|
||||
## History
|
||||
|
||||
This project is an active, drop-in replacement for the
|
||||
[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at
|
||||
[hpcloud](https://github.com/hpcloud/tail). Next to
|
||||
[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6),
|
||||
nxadm/tail continues the development by keeping up to date with the Go toolchain
|
||||
(e.g. go modules) and dependencies, completing the documentation, adding features
|
||||
and fixing bugs.
|
||||
|
||||
## Examples
|
||||
Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples).
|
|
@ -1,7 +0,0 @@
|
|||
Copyright (C) 2013 99designs
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,97 +0,0 @@
|
|||
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
|
||||
package ratelimiter
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type LeakyBucket struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
Now func() time.Time
|
||||
}
|
||||
|
||||
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: size,
|
||||
Fill: 0,
|
||||
LeakInterval: leakInterval,
|
||||
Now: time.Now,
|
||||
Lastupdate: time.Now(),
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) updateFill() {
|
||||
now := b.Now()
|
||||
if b.Fill > 0 {
|
||||
elapsed := now.Sub(b.Lastupdate)
|
||||
|
||||
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
|
||||
if b.Fill < 0 {
|
||||
b.Fill = 0
|
||||
}
|
||||
}
|
||||
b.Lastupdate = now
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Pour(amount uint16) bool {
|
||||
b.updateFill()
|
||||
|
||||
var newfill float64 = b.Fill + float64(amount)
|
||||
|
||||
if newfill > float64(b.Size) {
|
||||
return false
|
||||
}
|
||||
|
||||
b.Fill = newfill
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The time at which this bucket will be completely drained
|
||||
func (b *LeakyBucket) DrainedAt() time.Time {
|
||||
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
|
||||
}
|
||||
|
||||
// The duration until this bucket is completely drained
|
||||
func (b *LeakyBucket) TimeToDrain() time.Duration {
|
||||
return b.DrainedAt().Sub(b.Now())
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
|
||||
return b.Now().Sub(b.Lastupdate)
|
||||
}
|
||||
|
||||
type LeakyBucketSer struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
|
||||
bucket := LeakyBucketSer{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
Now: time.Now,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package ratelimiter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
GC_SIZE int = 100
|
||||
GC_PERIOD time.Duration = 60 * time.Second
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
store map[string]LeakyBucket
|
||||
lastGCCollected time.Time
|
||||
}
|
||||
|
||||
func NewMemory() *Memory {
|
||||
m := new(Memory)
|
||||
m.store = make(map[string]LeakyBucket)
|
||||
m.lastGCCollected = time.Now()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
|
||||
|
||||
bucket, ok := m.store[key]
|
||||
if !ok {
|
||||
return nil, errors.New("miss")
|
||||
}
|
||||
|
||||
return &bucket, nil
|
||||
}
|
||||
|
||||
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
|
||||
|
||||
if len(m.store) > GC_SIZE {
|
||||
m.GarbageCollect()
|
||||
}
|
||||
|
||||
m.store[key] = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Memory) GarbageCollect() {
|
||||
now := time.Now()
|
||||
|
||||
// rate limit GC to once per minute
|
||||
if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
|
||||
for key, bucket := range m.store {
|
||||
// if the bucket is drained, then GC
|
||||
if bucket.DrainedAt().Unix() < now.Unix() {
|
||||
delete(m.store, key)
|
||||
}
|
||||
}
|
||||
|
||||
m.lastGCCollected = now
|
||||
}
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
package ratelimiter
|
||||
|
||||
type Storage interface {
|
||||
GetBucketFor(string) (*LeakyBucket, error)
|
||||
SetBucketFor(string, LeakyBucket) error
|
||||
}
|
|
@ -1,455 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
//nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
||||
//program. The library comes with full support for truncation/move detection as
|
||||
//it is designed to work with log rotation tools. The library works on all
|
||||
//operating systems supported by Go, including POSIX systems like Linux and
|
||||
//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
||||
package tail
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nxadm/tail/ratelimiter"
|
||||
"github.com/nxadm/tail/util"
|
||||
"github.com/nxadm/tail/watch"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrStop is returned when the tail of a file has been marked to be stopped.
|
||||
ErrStop = errors.New("tail should now stop")
|
||||
)
|
||||
|
||||
type Line struct {
|
||||
Text string // The contents of the file
|
||||
Num int // The line number
|
||||
SeekInfo SeekInfo // SeekInfo
|
||||
Time time.Time // Present time
|
||||
Err error // Error from tail
|
||||
}
|
||||
|
||||
// Deprecated: this function is no longer used internally and it has little of no
|
||||
// use in the API. As such, it will be removed from the API in a future major
|
||||
// release.
|
||||
//
|
||||
// NewLine returns a * pointer to a Line struct.
|
||||
func NewLine(text string, lineNum int) *Line {
|
||||
return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
|
||||
}
|
||||
|
||||
// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek
|
||||
type SeekInfo struct {
|
||||
Offset int64
|
||||
Whence int
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Fatal(v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
Fatalln(v ...interface{})
|
||||
Panic(v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
Panicln(v ...interface{})
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// Config is used to specify how a file must be tailed.
|
||||
type Config struct {
|
||||
// File-specifc
|
||||
Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file
|
||||
ReOpen bool // Reopen recreated files (tail -F)
|
||||
MustExist bool // Fail early if the file does not exist
|
||||
Poll bool // Poll for file changes instead of using the default inotify
|
||||
Pipe bool // The file is a named pipe (mkfifo)
|
||||
|
||||
// Generic IO
|
||||
Follow bool // Continue looking for new lines (tail -f)
|
||||
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
||||
|
||||
// Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function)
|
||||
RateLimiter *ratelimiter.LeakyBucket
|
||||
|
||||
// Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger.
|
||||
// To disable logging, set it to tail.DiscardingLogger
|
||||
Logger logger
|
||||
}
|
||||
|
||||
type Tail struct {
|
||||
Filename string // The filename
|
||||
Lines chan *Line // A consumable channel of *Line
|
||||
Config // Tail.Configuration
|
||||
|
||||
file *os.File
|
||||
reader *bufio.Reader
|
||||
lineNum int
|
||||
|
||||
watcher watch.FileWatcher
|
||||
changes *watch.FileChanges
|
||||
|
||||
tomb.Tomb // provides: Done, Kill, Dying
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil
|
||||
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
// DiscardingLogger can be used to disable logging output
|
||||
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
||||
)
|
||||
|
||||
// TailFile begins tailing the file. And returns a pointer to a Tail struct
|
||||
// and an error. An output stream is made available via the Tail.Lines
|
||||
// channel (e.g. to be looped and printed). To handle errors during tailing,
|
||||
// after finishing reading from the Lines channel, invoke the `Wait` or `Err`
|
||||
// method on the returned *Tail.
|
||||
func TailFile(filename string, config Config) (*Tail, error) {
|
||||
if config.ReOpen && !config.Follow {
|
||||
util.Fatal("cannot set ReOpen without Follow.")
|
||||
}
|
||||
|
||||
t := &Tail{
|
||||
Filename: filename,
|
||||
Lines: make(chan *Line),
|
||||
Config: config,
|
||||
}
|
||||
|
||||
// when Logger was not specified in config, use default logger
|
||||
if t.Logger == nil {
|
||||
t.Logger = DefaultLogger
|
||||
}
|
||||
|
||||
if t.Poll {
|
||||
t.watcher = watch.NewPollingFileWatcher(filename)
|
||||
} else {
|
||||
t.watcher = watch.NewInotifyFileWatcher(filename)
|
||||
}
|
||||
|
||||
if t.MustExist {
|
||||
var err error
|
||||
t.file, err = OpenFile(t.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go t.tailFileSync()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Tell returns the file's current position, like stdio's ftell() and an error.
|
||||
// Beware that this value may not be completely accurate because one line from
|
||||
// the chan(tail.Lines) may have been read already.
|
||||
func (tail *Tail) Tell() (offset int64, err error) {
|
||||
if tail.file == nil {
|
||||
return
|
||||
}
|
||||
offset, err = tail.file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tail.lk.Lock()
|
||||
defer tail.lk.Unlock()
|
||||
if tail.reader == nil {
|
||||
return
|
||||
}
|
||||
|
||||
offset -= int64(tail.reader.Buffered())
|
||||
return
|
||||
}
|
||||
|
||||
// Stop stops the tailing activity.
|
||||
func (tail *Tail) Stop() error {
|
||||
tail.Kill(nil)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
// StopAtEOF stops tailing as soon as the end of the file is reached. The function
|
||||
// returns an error,
|
||||
func (tail *Tail) StopAtEOF() error {
|
||||
tail.Kill(errStopAtEOF)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
var errStopAtEOF = errors.New("tail: stop at eof")
|
||||
|
||||
func (tail *Tail) close() {
|
||||
close(tail.Lines)
|
||||
tail.closeFile()
|
||||
}
|
||||
|
||||
func (tail *Tail) closeFile() {
|
||||
if tail.file != nil {
|
||||
tail.file.Close()
|
||||
tail.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) reopen() error {
|
||||
tail.closeFile()
|
||||
tail.lineNum = 0
|
||||
for {
|
||||
var err error
|
||||
tail.file, err = OpenFile(tail.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
|
||||
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
|
||||
if err == tomb.ErrDying {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tail *Tail) readLine() (string, error) {
|
||||
tail.lk.Lock()
|
||||
line, err := tail.reader.ReadString('\n')
|
||||
tail.lk.Unlock()
|
||||
if err != nil {
|
||||
// Note ReadString "returns the data read before the error" in
|
||||
// case of an error, including EOF, so we return it as is. The
|
||||
// caller is expected to process it if err is EOF.
|
||||
return line, err
|
||||
}
|
||||
|
||||
line = strings.TrimRight(line, "\n")
|
||||
|
||||
return line, err
|
||||
}
|
||||
|
||||
func (tail *Tail) tailFileSync() {
|
||||
defer tail.Done()
|
||||
defer tail.close()
|
||||
|
||||
if !tail.MustExist {
|
||||
// deferred first open.
|
||||
err := tail.reopen()
|
||||
if err != nil {
|
||||
if err != tomb.ErrDying {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Seek to requested location on first open of the file.
|
||||
if tail.Location != nil {
|
||||
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
||||
if err != nil {
|
||||
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tail.openReader()
|
||||
|
||||
// Read line by line.
|
||||
for {
|
||||
// do not seek in named pipes
|
||||
if !tail.Pipe {
|
||||
// grab the position in case we need to back up in the event of a half-line
|
||||
if _, err := tail.Tell(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
line, err := tail.readLine()
|
||||
|
||||
// Process `line` even if err is EOF.
|
||||
if err == nil {
|
||||
cooloff := !tail.sendLine(line)
|
||||
if cooloff {
|
||||
// Wait a second before seeking till the end of
|
||||
// file when rate limit is reached.
|
||||
msg := ("Too much log activity; waiting a second before resuming tailing")
|
||||
offset, _ := tail.Tell()
|
||||
tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-tail.Dying():
|
||||
return
|
||||
}
|
||||
if err := tail.seekEnd(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if !tail.Follow {
|
||||
if line != "" {
|
||||
tail.sendLine(line)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tail.Follow && line != "" {
|
||||
tail.sendLine(line)
|
||||
if err := tail.seekEnd(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// When EOF is reached, wait for more data to become
|
||||
// available. Wait strategy is based on the `tail.watcher`
|
||||
// implementation (inotify or polling).
|
||||
err := tail.waitForChanges()
|
||||
if err != nil {
|
||||
if err != ErrStop {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// non-EOF error
|
||||
tail.Killf("Error reading %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.Dying():
|
||||
if tail.Err() == errStopAtEOF {
|
||||
continue
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForChanges waits until the file has been appended, deleted,
|
||||
// moved or truncated. When moved or deleted - the file will be
|
||||
// reopened if ReOpen is true. Truncated files are always reopened.
|
||||
func (tail *Tail) waitForChanges() error {
|
||||
if tail.changes == nil {
|
||||
pos, err := tail.file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.changes.Modified:
|
||||
return nil
|
||||
case <-tail.changes.Deleted:
|
||||
tail.changes = nil
|
||||
if tail.ReOpen {
|
||||
// XXX: we must not log from a library.
|
||||
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
}
|
||||
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
||||
return ErrStop
|
||||
case <-tail.changes.Truncated:
|
||||
// Always reopen truncated files (Follow is true)
|
||||
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
case <-tail.Dying():
|
||||
return ErrStop
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) openReader() {
|
||||
tail.lk.Lock()
|
||||
if tail.MaxLineSize > 0 {
|
||||
// add 2 to account for newline characters
|
||||
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
||||
} else {
|
||||
tail.reader = bufio.NewReader(tail.file)
|
||||
}
|
||||
tail.lk.Unlock()
|
||||
}
|
||||
|
||||
func (tail *Tail) seekEnd() error {
|
||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
|
||||
}
|
||||
|
||||
func (tail *Tail) seekTo(pos SeekInfo) error {
|
||||
_, err := tail.file.Seek(pos.Offset, pos.Whence)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
|
||||
}
|
||||
// Reset the read buffer whenever the file is re-seek'ed
|
||||
tail.reader.Reset(tail.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendLine sends the line(s) to Lines channel, splitting longer lines
|
||||
// if necessary. Return false if rate limit is reached.
|
||||
func (tail *Tail) sendLine(line string) bool {
|
||||
now := time.Now()
|
||||
lines := []string{line}
|
||||
|
||||
// Split longer lines
|
||||
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
|
||||
lines = util.PartitionString(line, tail.MaxLineSize)
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
tail.lineNum++
|
||||
offset, _ := tail.Tell()
|
||||
select {
|
||||
case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
|
||||
case <-tail.Dying():
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if tail.Config.RateLimiter != nil {
|
||||
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
||||
if !ok {
|
||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
|
||||
tail.Filename)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Cleanup removes inotify watches added by the tail package. This function is
|
||||
// meant to be invoked from a process's exit handler. Linux kernel may not
|
||||
// automatically remove inotify watches after the process exits.
|
||||
// If you plan to re-read a file, don't call Cleanup in between.
|
||||
func (tail *Tail) Cleanup() {
|
||||
watch.Cleanup(tail.Filename)
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build !windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Deprecated: this function is only useful internally and, as such,
|
||||
// it will be removed from the API in a future major release.
|
||||
//
|
||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
||||
// on POSIX and non-POSIX OSes like MS Windows.
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return os.Open(name)
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/nxadm/tail/winfile"
|
||||
)
|
||||
|
||||
// Deprecated: this function is only useful internally and, as such,
|
||||
// it will be removed from the API in a future major release.
|
||||
//
|
||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
||||
// on POSIX and non-POSIX OSes like MS Windows.
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
// fatal is like panic except it displays only the current goroutine's stack.
|
||||
func Fatal(format string, v ...interface{}) {
|
||||
// https://github.com/nxadm/log/blob/master/log.go#L45
|
||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// partitionString partitions the string into chunks of given size,
|
||||
// with the last chunk of variable size.
|
||||
func PartitionString(s string, chunkSize int) []string {
|
||||
if chunkSize <= 0 {
|
||||
panic("invalid chunkSize")
|
||||
}
|
||||
length := len(s)
|
||||
chunks := 1 + length/chunkSize
|
||||
start := 0
|
||||
end := chunkSize
|
||||
parts := make([]string, 0, chunks)
|
||||
for {
|
||||
if end > length {
|
||||
end = length
|
||||
}
|
||||
parts = append(parts, s[start:end])
|
||||
if end == length {
|
||||
break
|
||||
}
|
||||
start, end = end, end+chunkSize
|
||||
}
|
||||
return parts
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
package watch
|
||||
|
||||
type FileChanges struct {
|
||||
Modified chan bool // Channel to get notified of modifications
|
||||
Truncated chan bool // Channel to get notified of truncations
|
||||
Deleted chan bool // Channel to get notified of deletions/renames
|
||||
}
|
||||
|
||||
func NewFileChanges() *FileChanges {
|
||||
return &FileChanges{
|
||||
make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyModified() {
|
||||
sendOnlyIfEmpty(fc.Modified)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyTruncated() {
|
||||
sendOnlyIfEmpty(fc.Truncated)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyDeleted() {
|
||||
sendOnlyIfEmpty(fc.Deleted)
|
||||
}
|
||||
|
||||
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
||||
// backlog to be read by other goroutines. This concurrency pattern
|
||||
// can be used to notify other goroutines if and only if they are
|
||||
// looking for it (i.e., subsequent notifications can be compressed
|
||||
// into one).
|
||||
func sendOnlyIfEmpty(ch chan bool) {
|
||||
select {
|
||||
case ch <- true:
|
||||
default:
|
||||
}
|
||||
}
|
|
@ -1,136 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/nxadm/tail/util"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// InotifyFileWatcher uses inotify to monitor file changes.
|
||||
type InotifyFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
||||
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
err := WatchCreate(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer RemoveWatchCreate(fw.Filename)
|
||||
|
||||
// Do a real check now as the file might have been created before
|
||||
// calling `WatchFlags` above.
|
||||
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
||||
// file exists, or stat returned an error.
|
||||
return err
|
||||
}
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-events:
|
||||
if !ok {
|
||||
return fmt.Errorf("inotify watcher has been closed")
|
||||
}
|
||||
evtName, err := filepath.Abs(evt.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fwFilename, err := filepath.Abs(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if evtName == fwFilename {
|
||||
return nil
|
||||
}
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
err := Watch(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
prevSize := fw.Size
|
||||
|
||||
var evt fsnotify.Event
|
||||
var ok bool
|
||||
|
||||
select {
|
||||
case evt, ok = <-events:
|
||||
if !ok {
|
||||
RemoveWatch(fw.Filename)
|
||||
return
|
||||
}
|
||||
case <-t.Dying():
|
||||
RemoveWatch(fw.Filename)
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||
RemoveWatch(fw.Filename)
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
|
||||
//With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
|
||||
case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
RemoveWatch(fw.Filename)
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
fw.Size = fi.Size()
|
||||
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
} else {
|
||||
changes.NotifyModified()
|
||||
}
|
||||
prevSize = fw.Size
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
|
@ -1,249 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/nxadm/tail/util"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
type InotifyTracker struct {
|
||||
mux sync.Mutex
|
||||
watcher *fsnotify.Watcher
|
||||
chans map[string]chan fsnotify.Event
|
||||
done map[string]chan bool
|
||||
watchNums map[string]int
|
||||
watch chan *watchInfo
|
||||
remove chan *watchInfo
|
||||
error chan error
|
||||
}
|
||||
|
||||
type watchInfo struct {
|
||||
op fsnotify.Op
|
||||
fname string
|
||||
}
|
||||
|
||||
func (this *watchInfo) isCreate() bool {
|
||||
return this.op == fsnotify.Create
|
||||
}
|
||||
|
||||
var (
|
||||
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
||||
shared *InotifyTracker
|
||||
|
||||
// these are used to ensure the shared InotifyTracker is run exactly once
|
||||
once = sync.Once{}
|
||||
goRun = func() {
|
||||
shared = &InotifyTracker{
|
||||
mux: sync.Mutex{},
|
||||
chans: make(map[string]chan fsnotify.Event),
|
||||
done: make(map[string]chan bool),
|
||||
watchNums: make(map[string]int),
|
||||
watch: make(chan *watchInfo),
|
||||
remove: make(chan *watchInfo),
|
||||
error: make(chan error),
|
||||
}
|
||||
go shared.run()
|
||||
}
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Watch signals the run goroutine to begin watching the input filename
|
||||
func Watch(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// Watch create signals the run goroutine to begin watching the input filename
|
||||
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
||||
func WatchCreate(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func watch(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.watch <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatch(fname string) error {
|
||||
return remove(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatchCreate(fname string) error {
|
||||
return remove(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func remove(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.mux.Lock()
|
||||
done := shared.done[winfo.fname]
|
||||
if done != nil {
|
||||
delete(shared.done, winfo.fname)
|
||||
close(done)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
shared.remove <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) <-chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) error {
|
||||
return RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
}
|
||||
if shared.done[winfo.fname] == nil {
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
var err error
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] == 0 {
|
||||
err = shared.watcher.Add(fname)
|
||||
}
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch != nil {
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
shared.watchNums[fname]--
|
||||
watchNum := shared.watchNums[fname]
|
||||
if watchNum == 0 {
|
||||
delete(shared.watchNums, fname)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
var err error
|
||||
// If we were the last ones to watch this file, unsubscribe from inotify.
|
||||
// This needs to happen after releasing the lock because fsnotify waits
|
||||
// synchronously for the kernel to acknowledge the removal of the watch
|
||||
// for this file, which causes us to deadlock if we still held the lock.
|
||||
if watchNum == 0 {
|
||||
err = shared.watcher.Remove(fname)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// sendEvent sends the input event to the appropriate Tail.
|
||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||
name := filepath.Clean(event.Name)
|
||||
|
||||
shared.mux.Lock()
|
||||
ch := shared.chans[name]
|
||||
done := shared.done[name]
|
||||
shared.mux.Unlock()
|
||||
|
||||
if ch != nil && done != nil {
|
||||
select {
|
||||
case ch <- event:
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run starts the goroutine in which the shared struct reads events from its
|
||||
// Watcher's Event channel and sends the events to the appropriate Tail.
|
||||
func (shared *InotifyTracker) run() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
util.Fatal("failed to create Watcher")
|
||||
}
|
||||
shared.watcher = watcher
|
||||
|
||||
for {
|
||||
select {
|
||||
case winfo := <-shared.watch:
|
||||
shared.error <- shared.addWatch(winfo)
|
||||
|
||||
case winfo := <-shared.remove:
|
||||
shared.error <- shared.removeWatch(winfo)
|
||||
|
||||
case event, open := <-shared.watcher.Events:
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
shared.sendEvent(event)
|
||||
|
||||
case err, open := <-shared.watcher.Errors:
|
||||
if !open {
|
||||
return
|
||||
} else if err != nil {
|
||||
sysErr, ok := err.(*os.SyscallError)
|
||||
if !ok || sysErr.Err != syscall.EINTR {
|
||||
logger.Printf("Error in Watcher Error channel: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/nxadm/tail/util"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// PollingFileWatcher polls the file for changes.
|
||||
type PollingFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
||||
fw := &PollingFileWatcher{filename, 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
var POLL_DURATION time.Duration
|
||||
|
||||
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
for {
|
||||
if _, err := os.Stat(fw.Filename); err == nil {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-time.After(POLL_DURATION):
|
||||
continue
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
origFi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
var prevModTime time.Time
|
||||
|
||||
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
||||
// the fatal (below) with tomb's Kill.
|
||||
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
prevSize := fw.Size
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
time.Sleep(POLL_DURATION)
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
||||
// so it gives access denied to anything trying to read it until all handles are released.
|
||||
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
||||
// File does not exist (has been deleted).
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
|
||||
// File got moved/renamed?
|
||||
if !os.SameFile(origFi, fi) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// File got truncated?
|
||||
fw.Size = fi.Size()
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
// File got bigger?
|
||||
if prevSize > 0 && prevSize < fw.Size {
|
||||
changes.NotifyModified()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
prevSize = fw.Size
|
||||
|
||||
// File was appended to (changed)?
|
||||
modTime := fi.ModTime()
|
||||
if modTime != prevModTime {
|
||||
prevModTime = modTime
|
||||
changes.NotifyModified()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
POLL_DURATION = 250 * time.Millisecond
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import "gopkg.in/tomb.v1"
|
||||
|
||||
// FileWatcher monitors file-level events.
|
||||
type FileWatcher interface {
|
||||
// BlockUntilExists blocks until the file comes into existence.
|
||||
BlockUntilExists(*tomb.Tomb) error
|
||||
|
||||
// ChangeEvents reports on changes to a file, be it modification,
|
||||
// deletion, renames or truncations. Returned FileChanges group of
|
||||
// channels will be closed, thus become unusable, after a deletion
|
||||
// or truncation event.
|
||||
// In order to properly report truncations, ChangeEvents requires
|
||||
// the caller to pass their current offset in the file.
|
||||
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build windows
|
||||
|
||||
package winfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// issue also described here
|
||||
//https://codereview.appspot.com/8203043/
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
|
||||
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
||||
case syscall.O_RDONLY:
|
||||
access = syscall.GENERIC_READ
|
||||
case syscall.O_WRONLY:
|
||||
access = syscall.GENERIC_WRITE
|
||||
case syscall.O_RDWR:
|
||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_CREAT != 0 {
|
||||
access |= syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_APPEND != 0 {
|
||||
access &^= syscall.GENERIC_WRITE
|
||||
access |= syscall.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
||||
var sa *syscall.SecurityAttributes
|
||||
if mode&syscall.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
||||
createmode = syscall.CREATE_NEW
|
||||
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
||||
createmode = syscall.CREATE_ALWAYS
|
||||
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
||||
createmode = syscall.OPEN_ALWAYS
|
||||
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
||||
createmode = syscall.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = syscall.OPEN_EXISTING
|
||||
}
|
||||
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
|
||||
func makeInheritSa() *syscall.SecurityAttributes {
|
||||
var sa syscall.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
|
||||
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
// No mapping for Go's ModeTemporary (plan9 only).
|
||||
return
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- tip
|
||||
- 1.16.x
|
||||
- 1.15.x
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg/mod
|
||||
|
||||
# allow internal package imports, necessary for forked repositories
|
||||
go_import_path: github.com/onsi/ginkgo
|
||||
|
||||
install:
|
||||
- GO111MODULE="off" go get -v -t ./...
|
||||
- GO111MODULE="off" go get golang.org/x/tools/cmd/cover
|
||||
- GO111MODULE="off" go get github.com/onsi/gomega
|
||||
- GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
script:
|
||||
- GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum
|
||||
- go vet
|
||||
- ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
|
@ -1,169 +0,0 @@
|
|||

|
||||
|
||||
[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
|
||||
|
||||
Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
|
||||
|
||||
# Ginkgo 2.0 Release Candidate is available!
|
||||
|
||||
An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
|
||||
|
||||
As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
|
||||
|
||||
Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
|
||||
|
||||
## TLDR
|
||||
Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.
|
||||
It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library.
|
||||
|
||||
```go
|
||||
Describe("the strings package", func() {
|
||||
Context("strings.Contains()", func() {
|
||||
When("the string contains the substring in the middle", func() {
|
||||
It("returns `true`", func() {
|
||||
Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Feature List
|
||||
|
||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||
|
||||
- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
|
||||
- Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||
- [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
|
||||
- [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||
- [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||
|
||||
- A comprehensive test runner that lets you:
|
||||
- Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
|
||||
- [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||
- Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||
- Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
|
||||
|
||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||
- `ginkgo -cover` runs your tests using Go's code coverage tool
|
||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||
- `ginkgo -r` runs all tests suites under the current directory
|
||||
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||
|
||||
And much more: run `ginkgo help` for details!
|
||||
|
||||
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||
|
||||
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||
|
||||
- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||
|
||||
- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||
|
||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||
|
||||
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
|
||||
|
||||
- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`.
|
||||
|
||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||
|
||||
- A modular architecture that lets you easily:
|
||||
- Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||
- [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||
|
||||
## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||
|
||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
|
||||
|
||||
## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
|
||||
|
||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
|
||||
|
||||
## Getting Started
|
||||
|
||||
You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
|
||||
|
||||
### Global installation
|
||||
To install the Ginkgo command line interface:
|
||||
```bash
|
||||
go get -u github.com/onsi/ginkgo/ginkgo
|
||||
```
|
||||
Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information.
|
||||
|
||||
### Go module ["tools package"](https://github.com/golang/go/issues/25922):
|
||||
Create (or update) a file called `tools/tools.go` with the following contents:
|
||||
```go
|
||||
// +build tools
|
||||
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/onsi/ginkgo/ginkgo"
|
||||
)
|
||||
|
||||
// This file imports packages that are used when running go generate, or used
|
||||
// during the development process but not otherwise depended on by built code.
|
||||
```
|
||||
The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`.
|
||||
This approach allows the version of Ginkgo to be maintained under source control for reproducible results,
|
||||
and is well suited to automated test pipelines.
|
||||
|
||||
### Bootstrapping
|
||||
```bash
|
||||
cd path/to/package/you/want/to/test
|
||||
|
||||
ginkgo bootstrap # set up a new ginkgo suite
|
||||
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||
|
||||
go test # to run your tests
|
||||
|
||||
ginkgo # also runs your tests
|
||||
|
||||
```
|
||||
|
||||
## I'm new to Go: What are my testing options?
|
||||
|
||||
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||
|
||||
With that said, it's great to know what your options are :)
|
||||
|
||||
### What Go gives you out of the box
|
||||
|
||||
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
|
||||
### Matcher libraries for Go's XUnit style tests
|
||||
|
||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||
|
||||
- [testify](https://github.com/stretchr/testify)
|
||||
- [gocheck](https://labix.org/gocheck)
|
||||
|
||||
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||
|
||||
### BDD style testing frameworks
|
||||
|
||||
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
|
||||
|
||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||
- [Goblin](https://github.com/franela/goblin)
|
||||
- [Mao](https://github.com/azer/mao)
|
||||
- [Zen](https://github.com/pranavraja/zen)
|
||||
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
|
||||
|
||||
Go explore!
|
||||
|
||||
## License
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
|
@ -1,232 +0,0 @@
|
|||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
|
||||
|
||||
You can also learn more via
|
||||
|
||||
ginkgo help
|
||||
|
||||
or (I kid you not):
|
||||
|
||||
go test -asdf
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.16.5"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
RegexScansFilePath bool
|
||||
FocusStrings []string
|
||||
SkipStrings []string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
DebugParallel bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
var GinkgoConfig = GinkgoConfigType{}
|
||||
|
||||
type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
NoisySkippings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
ReportPassed bool
|
||||
ReportFile string
|
||||
}
|
||||
|
||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||
|
||||
func processPrefix(prefix string) string {
|
||||
if prefix != "" {
|
||||
prefix += "."
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
type flagFunc func(string)
|
||||
|
||||
func (f flagFunc) String() string { return "" }
|
||||
func (f flagFunc) Set(s string) error { f(s); return nil }
|
||||
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||
|
||||
flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.")
|
||||
flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
|
||||
|
||||
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||
}
|
||||
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
|
||||
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
|
||||
|
||||
}
|
||||
|
||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||
prefix = processPrefix(prefix)
|
||||
result := make([]string, 0)
|
||||
|
||||
if ginkgo.RandomSeed > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||
}
|
||||
|
||||
if ginkgo.RandomizeAllSpecs {
|
||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.SkipMeasurements {
|
||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailOnPending {
|
||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailFast {
|
||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DryRun {
|
||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||
}
|
||||
|
||||
for _, s := range ginkgo.FocusStrings {
|
||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s))
|
||||
}
|
||||
|
||||
for _, s := range ginkgo.SkipStrings {
|
||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s))
|
||||
}
|
||||
|
||||
if ginkgo.FlakeAttempts > 1 {
|
||||
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
|
||||
}
|
||||
|
||||
if ginkgo.EmitSpecProgress {
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DebugParallel {
|
||||
result = append(result, fmt.Sprintf("--%sdebug", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelTotal != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||
}
|
||||
|
||||
if ginkgo.StreamHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||
}
|
||||
|
||||
if ginkgo.SyncHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||
}
|
||||
|
||||
if ginkgo.RegexScansFilePath {
|
||||
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
|
||||
}
|
||||
|
||||
if reporter.NoColor {
|
||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||
}
|
||||
|
||||
if reporter.SlowSpecThreshold > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||
}
|
||||
|
||||
if !reporter.NoisyPendings {
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if !reporter.NoisySkippings {
|
||||
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
if reporter.Succinct {
|
||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||
}
|
||||
|
||||
if reporter.FullTrace {
|
||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||
}
|
||||
|
||||
if reporter.ReportPassed {
|
||||
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
|
||||
}
|
||||
|
||||
if reporter.ReportFile != "" {
|
||||
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// flagFocus implements the -focus flag.
|
||||
func flagFocus(arg string) {
|
||||
if arg != "" {
|
||||
GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg)
|
||||
}
|
||||
}
|
||||
|
||||
// flagSkip implements the -skip flag.
|
||||
func flagSkip(arg string) {
|
||||
if arg != "" {
|
||||
GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg)
|
||||
}
|
||||
}
|
|
@ -1,201 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"go/build"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildBootstrapCommand() *Command {
|
||||
var (
|
||||
agouti, noDot, internal bool
|
||||
customBootstrapFile string
|
||||
)
|
||||
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
|
||||
flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name")
|
||||
flagSet.StringVar(&customBootstrapFile, "template", "", "If specified, generate will use the contents of the file passed as the bootstrap template")
|
||||
|
||||
return &Command{
|
||||
Name: "bootstrap",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo bootstrap <FLAGS>",
|
||||
Usage: []string{
|
||||
"Bootstrap a test suite for the current package",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateBootstrap(agouti, noDot, internal, customBootstrapFile)
|
||||
emitRCAdvertisement()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var bootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
`
|
||||
|
||||
var agoutiBootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
|
||||
var agoutiDriver *agouti.WebDriver
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
// Choose a WebDriver:
|
||||
|
||||
agoutiDriver = agouti.PhantomJS()
|
||||
// agoutiDriver = agouti.Selenium()
|
||||
// agoutiDriver = agouti.ChromeDriver()
|
||||
|
||||
Expect(agoutiDriver.Start()).To(Succeed())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
Expect(agoutiDriver.Stop()).To(Succeed())
|
||||
})
|
||||
`
|
||||
|
||||
type bootstrapData struct {
|
||||
Package string
|
||||
FormattedName string
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not get current working directory: \n" + err.Error())
|
||||
}
|
||||
|
||||
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
|
||||
dirName = strings.Replace(dirName, " ", "_", -1)
|
||||
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
packageName := pkg.Name
|
||||
if err != nil {
|
||||
packageName = dirName
|
||||
}
|
||||
|
||||
formattedName := prettifyPackageName(filepath.Base(path))
|
||||
return packageName, dirName, formattedName
|
||||
}
|
||||
|
||||
func prettifyPackageName(name string) string {
|
||||
name = strings.Replace(name, "-", " ", -1)
|
||||
name = strings.Replace(name, "_", " ", -1)
|
||||
name = strings.Title(name)
|
||||
name = strings.Replace(name, " ", "", -1)
|
||||
return name
|
||||
}
|
||||
|
||||
func determinePackageName(name string, internal bool) string {
|
||||
if internal {
|
||||
return name
|
||||
}
|
||||
|
||||
return name + "_test"
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func generateBootstrap(agouti, noDot, internal bool, customBootstrapFile string) {
|
||||
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
data := bootstrapData{
|
||||
Package: determinePackageName(packageName, internal),
|
||||
FormattedName: formattedName,
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
}
|
||||
|
||||
if noDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
fmt.Printf("%s already exists.\n\n", targetFile)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not create file: " + err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if customBootstrapFile != "" {
|
||||
tpl, err := ioutil.ReadFile(customBootstrapFile)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
templateText = string(tpl)
|
||||
} else if agouti {
|
||||
templateText = agoutiBootstrapText
|
||||
} else {
|
||||
templateText = bootstrapText
|
||||
}
|
||||
|
||||
bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
bootstrapTemplate.Execute(buf, data)
|
||||
|
||||
if noDot {
|
||||
contents, err := nodot.ApplyNoDot(buf.Bytes())
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to import nodot declarations: " + err.Error())
|
||||
}
|
||||
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
|
||||
buf = bytes.NewBuffer(contents)
|
||||
}
|
||||
|
||||
buf.WriteTo(f)
|
||||
|
||||
goFmt(targetFile)
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
)
|
||||
|
||||
func BuildBuildCommand() *Command {
|
||||
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
builder := &SpecBuilder{
|
||||
commandFlags: commandFlags,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "build",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
|
||||
Usage: []string{
|
||||
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: builder.BuildSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecBuilder struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
|
||||
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
passed := true
|
||||
for _, suite := range suites {
|
||||
runner := testrunner.New(suite, 1, false, 0, r.commandFlags.GoOpts, nil)
|
||||
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||
|
||||
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||
err := runner.CompileTo(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
passed = false
|
||||
} else {
|
||||
fmt.Printf(" compiled %s.test\n", suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
if passed {
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
/*
|
||||
* Creates a func init() node
|
||||
*/
|
||||
func createVarUnderscoreBlock() *ast.ValueSpec {
|
||||
valueSpec := &ast.ValueSpec{}
|
||||
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
|
||||
ident := &ast.Ident{Name: "_", Obj: object}
|
||||
valueSpec.Names = append(valueSpec.Names, ident)
|
||||
return valueSpec
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates a Describe("Testing with ginkgo", func() { }) node
|
||||
*/
|
||||
func createDescribeBlock() *ast.CallExpr {
|
||||
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
|
||||
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
|
||||
describeIdent := &ast.Ident{Name: "Describe"}
|
||||
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the name of the *testing.T param
|
||||
* for a Test function that will be rewritten. This is useful because
|
||||
* we will want to replace the usage of this named *testing.T inside the
|
||||
* body of the function with a GinktoT.
|
||||
*/
|
||||
func namedTestingTArg(node *ast.FuncDecl) string {
|
||||
return node.Type.Params.List[0].Names[0].Name // *exhale*
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the block statement node for a Describe statement
|
||||
*/
|
||||
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
|
||||
var funcLit *ast.FuncLit
|
||||
var found = false
|
||||
|
||||
for _, node := range desc.Args {
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncLit:
|
||||
found = true
|
||||
funcLit = node
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
|
||||
}
|
||||
|
||||
return funcLit.Body
|
||||
}
|
||||
|
||||
/* convenience function for creating an It("TestNameHere")
|
||||
* with all the body of the test function inside the anonymous
|
||||
* func passed to It()
|
||||
*/
|
||||
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
|
||||
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
|
||||
testName := rewriteTestName(testFunc.Name.Name)
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
|
||||
itBlockIdent := &ast.Ident{Name: "It"}
|
||||
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
return &ast.ExprStmt{X: callExpr}
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite test names to be human readable
|
||||
* eg: rewrites "TestSomethingAmazing" as "something amazing"
|
||||
*/
|
||||
func rewriteTestName(testName string) string {
|
||||
nameComponents := []string{}
|
||||
currentString := ""
|
||||
indexOfTest := strings.Index(testName, "Test")
|
||||
if indexOfTest != 0 {
|
||||
return testName
|
||||
}
|
||||
|
||||
testName = strings.Replace(testName, "Test", "", 1)
|
||||
first, rest := testName[0], testName[1:]
|
||||
testName = string(unicode.ToLower(rune(first))) + rest
|
||||
|
||||
for _, rune := range testName {
|
||||
if unicode.IsUpper(rune) {
|
||||
nameComponents = append(nameComponents, currentString)
|
||||
currentString = string(unicode.ToLower(rune))
|
||||
} else {
|
||||
currentString += string(rune)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(append(nameComponents, currentString), " ")
|
||||
}
|
||||
|
||||
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
|
||||
return &ast.CallExpr{
|
||||
Lparen: ident.NamePos + 1,
|
||||
Rparen: ident.NamePos + 2,
|
||||
Fun: &ast.Ident{Name: "GinkgoT"},
|
||||
}
|
||||
}
|
||||
|
||||
func newGinkgoTInterface() *ast.Ident {
|
||||
return &ast.Ident{Name: "GinkgoTInterface"}
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given the root node of an AST, returns the node containing the
|
||||
* import statements for the file.
|
||||
*/
|
||||
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
|
||||
for _, declaration := range rootNode.Decls {
|
||||
decl, ok := declaration.(*ast.GenDecl)
|
||||
if !ok || len(decl.Specs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok = decl.Specs[0].(*ast.ImportSpec)
|
||||
if ok {
|
||||
imports = decl
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = fmt.Errorf("Could not find imports for root node:\n\t%#v\n", rootNode)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes "testing" import, if present
|
||||
*/
|
||||
func removeTestingImport(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
var index int
|
||||
for i, importSpec := range importDecl.Specs {
|
||||
importSpec := importSpec.(*ast.ImportSpec)
|
||||
if importSpec.Path.Value == "\"testing\"" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds import statements for onsi/ginkgo, if missing
|
||||
*/
|
||||
func addGinkgoImports(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
if len(importDecl.Specs) == 0 {
|
||||
// TODO: might need to create a import decl here
|
||||
panic("unimplemented : expected to find an imports block")
|
||||
}
|
||||
|
||||
needsGinkgo := true
|
||||
for _, importSpec := range importDecl.Specs {
|
||||
importSpec, ok := importSpec.(*ast.ImportSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
|
||||
needsGinkgo = false
|
||||
}
|
||||
}
|
||||
|
||||
if needsGinkgo {
|
||||
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function to create an import statement
|
||||
*/
|
||||
func createImport(name, path string) *ast.ImportSpec {
|
||||
return &ast.ImportSpec{
|
||||
Name: &ast.Ident{Name: name},
|
||||
Path: &ast.BasicLit{Kind: 9, Value: path},
|
||||
}
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
|
||||
* Go's build package, and then rewrites them. A ginkgo test suite file will
|
||||
* also be added for this package, and all of its child packages.
|
||||
*/
|
||||
func RewritePackage(packageName string) {
|
||||
pkg, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
for _, filename := range findTestsInPackage(pkg) {
|
||||
rewriteTestsInFile(filename)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a package, findTestsInPackage reads the test files in the directory,
|
||||
* and then recurses on each child package, returning a slice of all test files
|
||||
* found in this process.
|
||||
*/
|
||||
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
|
||||
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
|
||||
testfile, _ := filepath.Abs(filepath.Join(pkg.Dir, file))
|
||||
testfiles = append(testfiles, testfile)
|
||||
}
|
||||
|
||||
dirFiles, err := ioutil.ReadDir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`^[._]`)
|
||||
|
||||
for _, file := range dirFiles {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if re.Match([]byte(file.Name())) {
|
||||
continue
|
||||
}
|
||||
|
||||
packageName := filepath.Join(pkg.ImportPath, file.Name())
|
||||
subPackage, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
|
||||
}
|
||||
|
||||
addGinkgoSuiteForPackage(pkg)
|
||||
goFmtPackage(pkg)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `ginkgo bootstrap` to create a test suite file
|
||||
*/
|
||||
func addGinkgoSuiteForPackage(pkg *build.Package) {
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
|
||||
|
||||
_, err = os.Stat(suite_test_file)
|
||||
if err == nil {
|
||||
return // test file already exists, this should be a no-op
|
||||
}
|
||||
|
||||
err = os.Chdir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
output, err := exec.Command("ginkgo", "bootstrap").Output()
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
|
||||
}
|
||||
|
||||
err = os.Chdir(originalDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `go fmt` to format the package
|
||||
*/
|
||||
func goFmtPackage(pkg *build.Package) {
|
||||
path, _ := filepath.Abs(pkg.ImportPath)
|
||||
output, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", path, output, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempts to return a package with its test files already read.
|
||||
* The ImportMode arg to build.Import lets you specify if you want go to read the
|
||||
* buildable go files inside the package, but it fails if the package has no go files
|
||||
*/
|
||||
func packageWithName(name string) (pkg *build.Package, err error) {
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
|
||||
return
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a root node, walks its top level statements and returns
|
||||
* points to function nodes to rewrite as It statements.
|
||||
* These functions, according to Go testing convention, must be named
|
||||
* TestWithCamelCasedName and receive a single *testing.T argument.
|
||||
*/
|
||||
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
|
||||
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
|
||||
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
matches := testNameRegexp.MatchString(node.Name.Name)
|
||||
|
||||
if matches && receivesTestingT(node) {
|
||||
testsToRewrite = append(testsToRewrite, node)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function that looks at args to a function and determines if its
|
||||
* params include an argument of type *testing.T
|
||||
*/
|
||||
func receivesTestingT(node *ast.FuncDecl) bool {
|
||||
if len(node.Type.Params.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
intermediate := base.X.(*ast.SelectorExpr)
|
||||
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
|
||||
isTestingT := intermediate.Sel.Name == "T"
|
||||
|
||||
return isTestingPackage && isTestingT
|
||||
}
|
|
@ -1,162 +0,0 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a file path, rewrites any tests in the Ginkgo format.
|
||||
* First, we parse the AST, and update the imports declaration.
|
||||
* Then, we walk the first child elements in the file, returning tests to rewrite.
|
||||
* A top level init func is declared, with a single Describe func inside.
|
||||
* Then the test functions to rewrite are inserted as It statements inside the Describe.
|
||||
* Finally we walk the rest of the file, replacing other usages of *testing.T
|
||||
* Once that is complete, we write the AST back out again to its file.
|
||||
*/
|
||||
func rewriteTestsInFile(pathToFile string) {
|
||||
fileSet := token.NewFileSet()
|
||||
rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
|
||||
}
|
||||
|
||||
addGinkgoImports(rootNode)
|
||||
removeTestingImport(rootNode)
|
||||
|
||||
varUnderscoreBlock := createVarUnderscoreBlock()
|
||||
describeBlock := createDescribeBlock()
|
||||
varUnderscoreBlock.Values = []ast.Expr{describeBlock}
|
||||
|
||||
for _, testFunc := range findTestFuncs(rootNode) {
|
||||
rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
|
||||
}
|
||||
|
||||
underscoreDecl := &ast.GenDecl{
|
||||
Tok: 85, // gah, magick numbers are needed to make this work
|
||||
TokPos: 14, // this tricks Go into writing "var _ = Describe"
|
||||
Specs: []ast.Spec{varUnderscoreBlock},
|
||||
}
|
||||
|
||||
imports := rootNode.Decls[0]
|
||||
tail := rootNode.Decls[1:]
|
||||
rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
|
||||
rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
|
||||
walkNodesInRootNodeReplacingTestingT(rootNode)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
if err = format.Node(&buffer, fileSet, rootNode); err != nil {
|
||||
panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(pathToFile)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a test func named TestDoesSomethingNeat, rewrites it as
|
||||
* It("does something neat", func() { __test_body_here__ }) and adds it
|
||||
* to the Describe's list of statements
|
||||
*/
|
||||
func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
|
||||
var funcIndex int = -1
|
||||
for index, child := range rootNode.Decls {
|
||||
if child == testFunc {
|
||||
funcIndex = index
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if funcIndex < 0 {
|
||||
panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
|
||||
}
|
||||
|
||||
var block *ast.BlockStmt = blockStatementFromDescribe(describe)
|
||||
block.List = append(block.List, createItStatementForTestFunc(testFunc))
|
||||
replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
|
||||
|
||||
// remove the old test func from the root node's declarations
|
||||
rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
|
||||
}
|
||||
|
||||
/*
|
||||
* walks nodes inside of a test func's statements and replaces the usage of
|
||||
* it's named *testing.T param with GinkgoT's
|
||||
*/
|
||||
func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
|
||||
ast.Inspect(statementsBlock, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
keyValueExpr, ok := node.(*ast.KeyValueExpr)
|
||||
if ok {
|
||||
replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
|
||||
return true
|
||||
}
|
||||
|
||||
funcLiteral, ok := node.(*ast.FuncLit)
|
||||
if ok {
|
||||
replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
|
||||
return true
|
||||
}
|
||||
|
||||
callExpr, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
replaceTestingTsInArgsLists(callExpr, testingT)
|
||||
|
||||
funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
|
||||
if ok {
|
||||
replaceTestingTsMethodCalls(funCall, testingT)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
|
||||
* This function receives a selector expression (eg: t.Fail()) and
|
||||
* the name of the *testing.T param from the function declaration. Rewrites the
|
||||
* selector expression in place if the target was a *testing.T
|
||||
*/
|
||||
func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
|
||||
ident, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
selectorExpr.X = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces usages of a named *testing.T param inside of a call expression
|
||||
* with a new GinkgoT object
|
||||
*/
|
||||
func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
|
||||
for index, arg := range callExpr.Args {
|
||||
ident, ok := arg.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
callExpr.Args[index] = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,130 +0,0 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Rewrites any other top level funcs that receive a *testing.T param
|
||||
*/
|
||||
func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
|
||||
for _, decl := range declarations {
|
||||
decl, ok := decl.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range decl.Type.Params.List {
|
||||
starExpr, ok := param.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok || xIdent.Name != "testing" {
|
||||
continue
|
||||
}
|
||||
|
||||
if selectorExpr.Sel.Name != "T" {
|
||||
continue
|
||||
}
|
||||
|
||||
param.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks all of the nodes in the file, replacing *testing.T in struct
|
||||
* and func literal nodes. eg:
|
||||
* type foo struct { *testing.T }
|
||||
* var bar = func(t *testing.T) { }
|
||||
*/
|
||||
func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.StructType:
|
||||
replaceTestingTsInStructType(node)
|
||||
case *ast.FuncLit:
|
||||
replaceTypeDeclTestingTsInFuncLiteral(node)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces named *testing.T inside a composite literal
|
||||
*/
|
||||
func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
|
||||
ident, ok := kve.Value.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
kve.Value = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces *testing.T params in a func literal with GinkgoT
|
||||
*/
|
||||
func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
|
||||
for _, arg := range functionLiteral.Type.Params.List {
|
||||
starExpr, ok := arg.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
target, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
arg.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Replaces *testing.T types inside of a struct declaration with a GinkgoT
|
||||
* eg: type foo struct { *testing.T }
|
||||
*/
|
||||
func replaceTestingTsInStructType(structType *ast.StructType) {
|
||||
for _, field := range structType.Fields.List {
|
||||
starExpr, ok := field.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
field.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func BuildConvertCommand() *Command {
|
||||
return &Command{
|
||||
Name: "convert",
|
||||
FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo convert /path/to/package",
|
||||
Usage: []string{
|
||||
"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
|
||||
},
|
||||
Command: convertPackage,
|
||||
}
|
||||
}
|
||||
|
||||
func convertPackage(args []string, additionalArgs []string) {
|
||||
deprecationTracker := types.NewDeprecationTracker()
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Convert())
|
||||
fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
|
||||
|
||||
if len(args) != 1 {
|
||||
println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case error:
|
||||
println(err.Error())
|
||||
case string:
|
||||
println(err)
|
||||
default:
|
||||
println(fmt.Sprintf("unexpected error: %#v", err))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
convert.RewritePackage(args[0])
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func BuildHelpCommand() *Command {
|
||||
return &Command{
|
||||
Name: "help",
|
||||
FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo help <COMMAND>",
|
||||
Usage: []string{
|
||||
"Print usage information. If a command is passed in, print usage information just for that command.",
|
||||
},
|
||||
Command: printHelp,
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(args []string, additionalArgs []string) {
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
emitRCAdvertisement()
|
||||
} else {
|
||||
command, found := commandMatching(args[0])
|
||||
if !found {
|
||||
complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
|
||||
}
|
||||
|
||||
usageForCommand(command, true)
|
||||
emitRCAdvertisement()
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type InterruptHandler struct {
|
||||
interruptCount int
|
||||
lock *sync.Mutex
|
||||
C chan bool
|
||||
}
|
||||
|
||||
func NewInterruptHandler() *InterruptHandler {
|
||||
h := &InterruptHandler{
|
||||
lock: &sync.Mutex{},
|
||||
C: make(chan bool),
|
||||
}
|
||||
|
||||
go h.handleInterrupt()
|
||||
SwallowSigQuit()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) WasInterrupted() bool {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
return h.interruptCount > 0
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) handleInterrupt() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
|
||||
h.lock.Lock()
|
||||
h.interruptCount++
|
||||
if h.interruptCount == 1 {
|
||||
close(h.C)
|
||||
} else if h.interruptCount > 5 {
|
||||
os.Exit(1)
|
||||
}
|
||||
h.lock.Unlock()
|
||||
|
||||
go h.handleInterrupt()
|
||||
}
|
|
@ -1,337 +0,0 @@
|
|||
/*
|
||||
The Ginkgo CLI
|
||||
|
||||
The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more by running:
|
||||
|
||||
ginkgo help
|
||||
|
||||
Here are some of the more commonly used commands:
|
||||
|
||||
To install:
|
||||
|
||||
go install github.com/onsi/ginkgo/ginkgo
|
||||
|
||||
To run tests:
|
||||
|
||||
ginkgo
|
||||
|
||||
To run tests in all subdirectories:
|
||||
|
||||
ginkgo -r
|
||||
|
||||
To run tests in particular packages:
|
||||
|
||||
ginkgo <flags> /path/to/package /path/to/another/package
|
||||
|
||||
To pass arguments/flags to your tests:
|
||||
|
||||
ginkgo <flags> <packages> -- <pass-throughs>
|
||||
|
||||
To run tests in parallel
|
||||
|
||||
ginkgo -p
|
||||
|
||||
this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
(note that you don't need to provide -p in this case).
|
||||
|
||||
By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
|
||||
An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
|
||||
|
||||
ginkgo -nodes=N -stream=true
|
||||
|
||||
On windows, the default value for stream is true.
|
||||
|
||||
By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
|
||||
|
||||
ginkgo -keepGoing
|
||||
|
||||
To fail if there are ginkgo tests in a directory but no test suite (missing `RunSpecs`)
|
||||
|
||||
ginkgo -requireSuite
|
||||
|
||||
To monitor packages and rerun tests when changes occur:
|
||||
|
||||
ginkgo watch <-r> </path/to/package>
|
||||
|
||||
passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
|
||||
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
||||
that depend on X are not rerun.
|
||||
|
||||
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
|
||||
|
||||
ginkgo -notify
|
||||
|
||||
this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
|
||||
|
||||
Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
|
||||
|
||||
ginkgo -untilItFails
|
||||
|
||||
To bootstrap a test suite:
|
||||
|
||||
ginkgo bootstrap
|
||||
|
||||
To generate a test file:
|
||||
|
||||
ginkgo generate <test_file_name>
|
||||
|
||||
To bootstrap/generate test files without using "." imports:
|
||||
|
||||
ginkgo bootstrap --nodot
|
||||
ginkgo generate --nodot
|
||||
|
||||
this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
|
||||
|
||||
ginkgo nodot
|
||||
|
||||
to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
|
||||
|
||||
To convert an existing XUnit style test suite to a Ginkgo-style test suite:
|
||||
|
||||
ginkgo convert .
|
||||
|
||||
To unfocus tests:
|
||||
|
||||
ginkgo unfocus
|
||||
|
||||
or
|
||||
|
||||
ginkgo blur
|
||||
|
||||
To compile a test suite:
|
||||
|
||||
ginkgo build <path-to-package>
|
||||
|
||||
will output an executable file named `package.test`. This can be run directly or by invoking
|
||||
|
||||
ginkgo <path-to-package.test>
|
||||
|
||||
|
||||
To print an outline of Ginkgo specs and containers in a file:
|
||||
|
||||
gingko outline <filename>
|
||||
|
||||
To print out Ginkgo's version:
|
||||
|
||||
ginkgo version
|
||||
|
||||
To get more help:
|
||||
|
||||
ginkgo help
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/formatter"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
const greenColor = "\x1b[32m"
|
||||
const redColor = "\x1b[91m"
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
AltName string
|
||||
FlagSet *flag.FlagSet
|
||||
Usage []string
|
||||
UsageCommand string
|
||||
Command func(args []string, additionalArgs []string)
|
||||
SuppressFlagDocumentation bool
|
||||
FlagDocSubstitute []string
|
||||
}
|
||||
|
||||
func (c *Command) Matches(name string) bool {
|
||||
return c.Name == name || (c.AltName != "" && c.AltName == name)
|
||||
}
|
||||
|
||||
func (c *Command) Run(args []string, additionalArgs []string) {
|
||||
c.FlagSet.Usage = usage
|
||||
c.FlagSet.Parse(args)
|
||||
c.Command(c.FlagSet.Args(), additionalArgs)
|
||||
}
|
||||
|
||||
var DefaultCommand *Command
|
||||
var Commands []*Command
|
||||
|
||||
func init() {
|
||||
DefaultCommand = BuildRunCommand()
|
||||
Commands = append(Commands, BuildWatchCommand())
|
||||
Commands = append(Commands, BuildBuildCommand())
|
||||
Commands = append(Commands, BuildBootstrapCommand())
|
||||
Commands = append(Commands, BuildGenerateCommand())
|
||||
Commands = append(Commands, BuildNodotCommand())
|
||||
Commands = append(Commands, BuildConvertCommand())
|
||||
Commands = append(Commands, BuildUnfocusCommand())
|
||||
Commands = append(Commands, BuildVersionCommand())
|
||||
Commands = append(Commands, BuildHelpCommand())
|
||||
Commands = append(Commands, BuildOutlineCommand())
|
||||
}
|
||||
|
||||
func main() {
|
||||
args := []string{}
|
||||
additionalArgs := []string{}
|
||||
|
||||
foundDelimiter := false
|
||||
|
||||
for _, arg := range os.Args[1:] {
|
||||
if !foundDelimiter {
|
||||
if arg == "--" {
|
||||
foundDelimiter = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if foundDelimiter {
|
||||
additionalArgs = append(additionalArgs, arg)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
commandToRun, found := commandMatching(args[0])
|
||||
if found {
|
||||
commandToRun.Run(args[1:], additionalArgs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
DefaultCommand.Run(args, additionalArgs)
|
||||
}
|
||||
|
||||
func commandMatching(name string) (*Command, bool) {
|
||||
for _, command := range Commands {
|
||||
if command.Matches(name) {
|
||||
return command, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Printf("Ginkgo Version %s\n\n", config.VERSION)
|
||||
usageForCommand(DefaultCommand, false)
|
||||
for _, command := range Commands {
|
||||
fmt.Printf("\n")
|
||||
usageForCommand(command, false)
|
||||
}
|
||||
}
|
||||
|
||||
func usageForCommand(command *Command, longForm bool) {
|
||||
fmt.Printf("%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
|
||||
fmt.Printf("%s\n", strings.Join(command.Usage, "\n"))
|
||||
if command.SuppressFlagDocumentation && !longForm {
|
||||
fmt.Printf("%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
|
||||
} else {
|
||||
command.FlagSet.SetOutput(os.Stdout)
|
||||
command.FlagSet.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
func complainAndQuit(complaint string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
|
||||
emitRCAdvertisement()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func findSuites(args []string, recurseForAll bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
|
||||
suites := []testsuite.TestSuite{}
|
||||
|
||||
if len(args) > 0 {
|
||||
for _, arg := range args {
|
||||
if allowPrecompiled {
|
||||
suite, err := testsuite.PrecompiledTestSuite(arg)
|
||||
if err == nil {
|
||||
suites = append(suites, suite)
|
||||
continue
|
||||
}
|
||||
}
|
||||
recurseForSuite := recurseForAll
|
||||
if strings.HasSuffix(arg, "/...") && arg != "/..." {
|
||||
arg = arg[:len(arg)-4]
|
||||
recurseForSuite = true
|
||||
}
|
||||
suites = append(suites, testsuite.SuitesInDir(arg, recurseForSuite)...)
|
||||
}
|
||||
} else {
|
||||
suites = testsuite.SuitesInDir(".", recurseForAll)
|
||||
}
|
||||
|
||||
skippedPackages := []string{}
|
||||
if skipPackage != "" {
|
||||
skipFilters := strings.Split(skipPackage, ",")
|
||||
filteredSuites := []testsuite.TestSuite{}
|
||||
for _, suite := range suites {
|
||||
skip := false
|
||||
for _, skipFilter := range skipFilters {
|
||||
if strings.Contains(suite.Path, skipFilter) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
skippedPackages = append(skippedPackages, suite.Path)
|
||||
} else {
|
||||
filteredSuites = append(filteredSuites, suite)
|
||||
}
|
||||
}
|
||||
suites = filteredSuites
|
||||
}
|
||||
|
||||
return suites, skippedPackages
|
||||
}
|
||||
|
||||
func goFmt(path string) {
|
||||
out, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not fmt: " + err.Error() + "\n" + string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func pluralizedWord(singular, plural string, count int) string {
|
||||
if count == 1 {
|
||||
return singular
|
||||
}
|
||||
return plural
|
||||
}
|
||||
|
||||
func emitRCAdvertisement() {
|
||||
ackRC := os.Getenv("ACK_GINKGO_RC")
|
||||
if ackRC != "" {
|
||||
return
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
if err == nil {
|
||||
_, err := os.Stat(filepath.Join(home, ".ack-ginkgo-rc"))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
out := formatter.F("\n{{light-yellow}}Ginkgo 2.0 is coming soon!{{/}}\n")
|
||||
out += formatter.F("{{light-yellow}}=========================={{/}}\n")
|
||||
out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n")
|
||||
out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n")
|
||||
out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n")
|
||||
out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n")
|
||||
out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
|
||||
out += formatter.F("To {{bold}}{{coral}}silence this notice{{/}}, set the environment variable: {{bold}}ACK_GINKGO_RC=true{{/}}\n")
|
||||
out += formatter.F("Alternatively you can: {{bold}}touch $HOME/.ack-ginkgo-rc{{/}}")
|
||||
|
||||
fmt.Println(out)
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
package nodot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ApplyNoDot(data []byte) ([]byte, error) {
|
||||
sections, err := generateNodotSections()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, section := range sections {
|
||||
data = section.createOrUpdateIn(data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
type nodotSection struct {
|
||||
name string
|
||||
pkg string
|
||||
declarations []string
|
||||
types []string
|
||||
}
|
||||
|
||||
func (s nodotSection) createOrUpdateIn(data []byte) []byte {
|
||||
renames := map[string]string{}
|
||||
|
||||
contents := string(data)
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
|
||||
comment := "// Declarations for " + s.name
|
||||
|
||||
newLines := []string{}
|
||||
for _, line := range lines {
|
||||
if line == comment {
|
||||
continue
|
||||
}
|
||||
|
||||
words := strings.Split(line, " ")
|
||||
lastWord := words[len(words)-1]
|
||||
|
||||
if s.containsDeclarationOrType(lastWord) {
|
||||
renames[lastWord] = words[1]
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
if len(newLines[len(newLines)-1]) > 0 {
|
||||
newLines = append(newLines, "")
|
||||
}
|
||||
|
||||
newLines = append(newLines, comment)
|
||||
|
||||
for _, typ := range s.types {
|
||||
name, ok := renames[s.prefix(typ)]
|
||||
if !ok {
|
||||
name = typ
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
|
||||
}
|
||||
|
||||
for _, decl := range s.declarations {
|
||||
name, ok := renames[s.prefix(decl)]
|
||||
if !ok {
|
||||
name = decl
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
|
||||
}
|
||||
|
||||
newLines = append(newLines, "")
|
||||
|
||||
newContents := strings.Join(newLines, "\n")
|
||||
|
||||
return []byte(newContents)
|
||||
}
|
||||
|
||||
func (s nodotSection) prefix(declOrType string) string {
|
||||
return s.pkg + "." + declOrType
|
||||
}
|
||||
|
||||
func (s nodotSection) containsDeclarationOrType(word string) bool {
|
||||
for _, declaration := range s.declarations {
|
||||
if s.prefix(declaration) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, typ := range s.types {
|
||||
if s.prefix(typ) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func generateNodotSections() ([]nodotSection, error) {
|
||||
sections := []nodotSection{}
|
||||
|
||||
declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Ginkgo DSL",
|
||||
pkg: "ginkgo",
|
||||
declarations: declarations,
|
||||
types: []string{"Done", "Benchmarker"},
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega DSL",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega Matchers",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
return sections, nil
|
||||
}
|
||||
|
||||
func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
|
||||
pkg, err := build.Import(pkgPath, ".", 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
blacklistLookup := map[string]bool{}
|
||||
for _, declaration := range blacklist {
|
||||
blacklistLookup[declaration] = true
|
||||
}
|
||||
|
||||
filteredDeclarations := []string{}
|
||||
for _, declaration := range declarations {
|
||||
if blacklistLookup[declaration] {
|
||||
continue
|
||||
}
|
||||
filteredDeclarations = append(filteredDeclarations, declaration)
|
||||
}
|
||||
|
||||
return filteredDeclarations, nil
|
||||
}
|
||||
|
||||
func getExportedDeclarationsForFile(path string) ([]string, error) {
|
||||
fset := token.NewFileSet()
|
||||
tree, err := parser.ParseFile(fset, path, nil, 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations := []string{}
|
||||
ast.FileExports(tree)
|
||||
for _, decl := range tree.Decls {
|
||||
switch x := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
switch s := x.Specs[0].(type) {
|
||||
case *ast.ValueSpec:
|
||||
declarations = append(declarations, s.Names[0].Name)
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
if x.Recv == nil {
|
||||
declarations = append(declarations, x.Name.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return declarations, nil
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildNodotCommand() *Command {
|
||||
return &Command{
|
||||
Name: "nodot",
|
||||
FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo nodot",
|
||||
Usage: []string{
|
||||
"Update the nodot declarations in your test suite",
|
||||
"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
|
||||
"If you've renamed a declaration, that name will be honored and not overwritten.",
|
||||
},
|
||||
Command: updateNodot,
|
||||
}
|
||||
}
|
||||
|
||||
func updateNodot(args []string, additionalArgs []string) {
|
||||
suiteFile, perm := findSuiteFile()
|
||||
|
||||
data, err := ioutil.ReadFile(suiteFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
|
||||
content, err := nodot.ApplyNoDot(data)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
ioutil.WriteFile(suiteFile, content, perm)
|
||||
|
||||
goFmt(suiteFile)
|
||||
}
|
||||
|
||||
func findSuiteFile() (string, os.FileMode) {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(workingDir)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(workingDir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if re.MatchReader(bufio.NewReader(f)) {
|
||||
return path, file.Mode()
|
||||
}
|
||||
}
|
||||
|
||||
complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
|
||||
|
||||
return "", 0
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Notifier struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
}
|
||||
|
||||
func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
|
||||
return &Notifier{
|
||||
commandFlags: commandFlags,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
||||
|
||||
OSX:
|
||||
|
||||
To remedy this:
|
||||
|
||||
brew install terminal-notifier
|
||||
|
||||
To learn more about terminal-notifier:
|
||||
|
||||
https://github.com/alloy/terminal-notifier
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
|
||||
|
||||
Linux:
|
||||
|
||||
Download and install notify-send for your distribution
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
|
||||
if suitePassed {
|
||||
n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
|
||||
} else {
|
||||
n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendNotification(title string, subtitle string) {
|
||||
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err == nil {
|
||||
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
||||
terminal := os.Getenv("TERM_PROGRAM")
|
||||
if terminal == "iTerm.app" {
|
||||
args = append(args, "-activate", "com.googlecode.iterm2")
|
||||
} else if terminal == "Apple_Terminal" {
|
||||
args = append(args, "-activate", "com.apple.Terminal")
|
||||
}
|
||||
|
||||
exec.Command("terminal-notifier", args...).Run()
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err == nil {
|
||||
args := []string{"-a", "ginkgo", title, subtitle}
|
||||
exec.Command("notify-send", args...).Run()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
|
||||
|
||||
command := n.commandFlags.AfterSuiteHook
|
||||
if command != "" {
|
||||
|
||||
// Allow for string replacement to pass input to the command
|
||||
passed := "[FAIL]"
|
||||
if suitePassed {
|
||||
passed = "[PASS]"
|
||||
}
|
||||
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
|
||||
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
|
||||
|
||||
// Must break command into parts
|
||||
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||
parts := splitArgs.FindAllString(command, -1)
|
||||
|
||||
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println("Post-suite command failed:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
|
||||
}
|
||||
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
|
||||
} else {
|
||||
fmt.Println("Post-suite command succeeded:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/outline"
|
||||
)
|
||||
|
||||
const (
|
||||
// indentWidth is the width used by the 'indent' output
|
||||
indentWidth = 4
|
||||
// stdinAlias is a portable alias for stdin. This convention is used in
|
||||
// other CLIs, e.g., kubectl.
|
||||
stdinAlias = "-"
|
||||
usageCommand = "ginkgo outline <filename>"
|
||||
)
|
||||
|
||||
func BuildOutlineCommand() *Command {
|
||||
const defaultFormat = "csv"
|
||||
var format string
|
||||
flagSet := flag.NewFlagSet("outline", flag.ExitOnError)
|
||||
flagSet.StringVar(&format, "format", defaultFormat, "Format of outline. Accepted: 'csv', 'indent', 'json'")
|
||||
return &Command{
|
||||
Name: "outline",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: usageCommand,
|
||||
Usage: []string{
|
||||
"Create an outline of Ginkgo symbols for a file",
|
||||
"To read from stdin, use: `ginkgo outline -`",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
outlineFile(args, format)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func outlineFile(args []string, format string) {
|
||||
if len(args) != 1 {
|
||||
println(fmt.Sprintf("usage: %s", usageCommand))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
filename := args[0]
|
||||
var src *os.File
|
||||
if filename == stdinAlias {
|
||||
src = os.Stdin
|
||||
} else {
|
||||
var err error
|
||||
src, err = os.Open(filename)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error opening file: %s", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error parsing source: %s", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
o, err := outline.FromASTFile(fset, parsedSrc)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error creating outline: %s", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var oerr error
|
||||
switch format {
|
||||
case "csv":
|
||||
_, oerr = fmt.Print(o)
|
||||
case "indent":
|
||||
_, oerr = fmt.Print(o.StringIndent(indentWidth))
|
||||
case "json":
|
||||
b, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error marshalling to json: %s", err))
|
||||
}
|
||||
_, oerr = fmt.Println(string(b))
|
||||
default:
|
||||
complainAndQuit(fmt.Sprintf("format %s not accepted", format))
|
||||
}
|
||||
if oerr != nil {
|
||||
println(fmt.Sprintf("error writing outline: %s", oerr))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
|
@ -1,316 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func BuildRunCommand() *Command {
|
||||
commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
|
||||
notifier := NewNotifier(commandFlags)
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
runner := &SpecRunner{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: runner.RunSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecRunner struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
r.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
deprecationTracker := types.NewDeprecationTracker()
|
||||
|
||||
if r.commandFlags.ParallelStream && (runtime.GOOS != "windows") {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecation{
|
||||
Message: "--stream is deprecated and will be removed in Ginkgo 2.0",
|
||||
DocLink: "removed--stream",
|
||||
Version: "1.16.0",
|
||||
})
|
||||
}
|
||||
|
||||
if r.commandFlags.Notify {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecation{
|
||||
Message: "--notify is deprecated and will be removed in Ginkgo 2.0",
|
||||
DocLink: "removed--notify",
|
||||
Version: "1.16.0",
|
||||
})
|
||||
}
|
||||
|
||||
if deprecationTracker.DidTrackDeprecations() {
|
||||
fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
|
||||
}
|
||||
|
||||
suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
|
||||
if len(skippedPackages) > 0 {
|
||||
fmt.Println("Will skip:")
|
||||
for _, skippedPackage := range skippedPackages {
|
||||
fmt.Println(" " + skippedPackage)
|
||||
}
|
||||
}
|
||||
|
||||
if len(skippedPackages) > 0 && len(suites) == 0 {
|
||||
fmt.Println("All tests skipped! Exiting...")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
r.ComputeSuccinctMode(len(suites))
|
||||
|
||||
t := time.Now()
|
||||
|
||||
runners := []*testrunner.TestRunner{}
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs))
|
||||
}
|
||||
|
||||
numSuites := 0
|
||||
runResult := testrunner.PassingRunResult()
|
||||
if r.commandFlags.UntilItFails {
|
||||
iteration := 0
|
||||
for {
|
||||
r.UpdateSeed()
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
iteration++
|
||||
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
break
|
||||
}
|
||||
|
||||
if runResult.Passed {
|
||||
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
|
||||
} else {
|
||||
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
}
|
||||
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
if r.isInCoverageMode() {
|
||||
if r.getOutputDir() != "" {
|
||||
// If coverprofile is set, combine coverages
|
||||
if r.getCoverprofile() != "" {
|
||||
if err := r.combineCoverprofiles(runners); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Just move them
|
||||
r.moveCoverprofiles(runners)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
|
||||
|
||||
if runResult.Passed {
|
||||
if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
} else {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Test Suite Failed\n")
|
||||
emitRCAdvertisement()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Moves all generated profiles to specified directory
|
||||
func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) {
|
||||
for _, runner := range runners {
|
||||
_, filename := filepath.Split(runner.CoverageFile)
|
||||
err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename))
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Combines all generated profiles in the specified directory
|
||||
func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error {
|
||||
|
||||
path, _ := filepath.Abs(r.getOutputDir())
|
||||
if !fileExists(path) {
|
||||
return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir())
|
||||
}
|
||||
|
||||
fmt.Println("path is " + path)
|
||||
|
||||
combined, err := os.OpenFile(
|
||||
filepath.Join(path, r.getCoverprofile()),
|
||||
os.O_WRONLY|os.O_CREATE,
|
||||
0666,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to create combined profile, %v\n", err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
|
||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||
for index, runner := range runners {
|
||||
contents, err := ioutil.ReadFile(runner.CoverageFile)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
|
||||
// remove the cover mode line from every file
|
||||
// except the first one
|
||||
if index > 0 {
|
||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
// Add a newline to the end of every file if missing.
|
||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||
_, err = combined.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to append to coverprofile, %v\n", err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("All profiles combined")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SpecRunner) isInCoverageMode() bool {
|
||||
opts := r.commandFlags.GoOpts
|
||||
return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != ""
|
||||
}
|
||||
|
||||
func (r *SpecRunner) getCoverprofile() string {
|
||||
return *r.commandFlags.GoOpts["coverprofile"].(*string)
|
||||
}
|
||||
|
||||
func (r *SpecRunner) getOutputDir() string {
|
||||
return *r.commandFlags.GoOpts["outputdir"].(*string)
|
||||
}
|
||||
|
||||
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) UpdateSeed() {
|
||||
if !r.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
|
||||
if !r.commandFlags.RandomizeSuites {
|
||||
return runners
|
||||
}
|
||||
|
||||
if len(runners) <= 1 {
|
||||
return runners
|
||||
}
|
||||
|
||||
randomizedRunners := make([]*testrunner.TestRunner, len(runners))
|
||||
randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
|
||||
permutation := randomizer.Perm(len(runners))
|
||||
for i, j := range permutation {
|
||||
randomizedRunners[i] = runners[j]
|
||||
}
|
||||
return randomizedRunners
|
||||
}
|
||||
|
||||
func orcMessage(iteration int) string {
|
||||
if iteration < 10 {
|
||||
return ""
|
||||
} else if iteration < 30 {
|
||||
return []string{
|
||||
"If at first you succeed...",
|
||||
"...try, try again.",
|
||||
"Looking good!",
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Oh boy, here I go testin' again!",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
"I'm getting tired...",
|
||||
"What if I just made you a sandwich?",
|
||||
"Hit ^C, hit ^C, please hit ^C",
|
||||
"Make it stop. Please!",
|
||||
"Come on! Enough is enough!",
|
||||
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||
"Just what do you think you're doing, Dave? ",
|
||||
"I, Sisyphus",
|
||||
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||
"I guess Einstein never tried to churn butter",
|
||||
}[iteration-10] + "\n"
|
||||
} else {
|
||||
return "No, seriously... you can probably stop now.\n"
|
||||
}
|
||||
}
|
|
@ -1,169 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
type RunWatchAndBuildCommandFlags struct {
|
||||
Recurse bool
|
||||
SkipPackage string
|
||||
GoOpts map[string]interface{}
|
||||
|
||||
//for run and watch commands
|
||||
NumCPU int
|
||||
NumCompilers int
|
||||
ParallelStream bool
|
||||
Notify bool
|
||||
AfterSuiteHook string
|
||||
AutoNodes bool
|
||||
Timeout time.Duration
|
||||
|
||||
//only for run command
|
||||
KeepGoing bool
|
||||
UntilItFails bool
|
||||
RandomizeSuites bool
|
||||
|
||||
//only for watch command
|
||||
Depth int
|
||||
WatchRegExp string
|
||||
|
||||
FlagSet *flag.FlagSet
|
||||
}
|
||||
|
||||
const runMode = 1
|
||||
const watchMode = 2
|
||||
const buildMode = 3
|
||||
|
||||
func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(runMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(watchMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(buildMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
|
||||
wasSet := false
|
||||
c.FlagSet.Visit(func(f *flag.Flag) {
|
||||
if f.Name == flagName {
|
||||
wasSet = true
|
||||
}
|
||||
})
|
||||
|
||||
return wasSet
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) computeNodes() {
|
||||
if c.wasSet("nodes") {
|
||||
return
|
||||
}
|
||||
if c.AutoNodes {
|
||||
switch n := runtime.NumCPU(); {
|
||||
case n <= 4:
|
||||
c.NumCPU = n
|
||||
default:
|
||||
c.NumCPU = n - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) stringSlot(slot string) *string {
|
||||
var opt string
|
||||
c.GoOpts[slot] = &opt
|
||||
return &opt
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) boolSlot(slot string) *bool {
|
||||
var opt bool
|
||||
c.GoOpts[slot] = &opt
|
||||
return &opt
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) intSlot(slot string) *int {
|
||||
var opt int
|
||||
c.GoOpts[slot] = &opt
|
||||
return &opt
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
c.GoOpts = make(map[string]interface{})
|
||||
|
||||
onWindows := (runtime.GOOS == "windows")
|
||||
|
||||
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("race"), "race", false, "Run tests with race detection enabled.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("cover"), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory.")
|
||||
c.FlagSet.StringVar(c.stringSlot("coverpkg"), "coverpkg", "", "Run tests with coverage on the given external modules.")
|
||||
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
||||
c.FlagSet.StringVar(c.stringSlot("tags"), "tags", "", "A list of build tags to consider satisfied during the build.")
|
||||
c.FlagSet.StringVar(c.stringSlot("gcflags"), "gcflags", "", "Arguments to pass on each go tool compile invocation.")
|
||||
c.FlagSet.StringVar(c.stringSlot("covermode"), "covermode", "", "Set the mode for coverage analysis.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("a"), "a", false, "Force rebuilding of packages that are already up-to-date.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("n"), "n", false, "Have `go test` print the commands but do not run them.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("msan"), "msan", false, "Enable interoperation with memory sanitizer.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("x"), "x", false, "Have `go test` print the commands.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("work"), "work", false, "Print the name of the temporary work directory and do not delete it when exiting.")
|
||||
c.FlagSet.StringVar(c.stringSlot("asmflags"), "asmflags", "", "Arguments to pass on each go tool asm invocation.")
|
||||
c.FlagSet.StringVar(c.stringSlot("buildmode"), "buildmode", "", "Build mode to use. See 'go help buildmode' for more.")
|
||||
c.FlagSet.StringVar(c.stringSlot("mod"), "mod", "", "Go module control. See 'go help modules' for more.")
|
||||
c.FlagSet.StringVar(c.stringSlot("compiler"), "compiler", "", "Name of compiler to use, as in runtime.Compiler (gccgo or gc).")
|
||||
c.FlagSet.StringVar(c.stringSlot("gccgoflags"), "gccgoflags", "", "Arguments to pass on each gccgo compiler/linker invocation.")
|
||||
c.FlagSet.StringVar(c.stringSlot("installsuffix"), "installsuffix", "", "A suffix to use in the name of the package installation directory.")
|
||||
c.FlagSet.StringVar(c.stringSlot("ldflags"), "ldflags", "", "Arguments to pass on each go tool link invocation.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("linkshared"), "linkshared", false, "Link against shared libraries previously created with -buildmode=shared.")
|
||||
c.FlagSet.StringVar(c.stringSlot("pkgdir"), "pkgdir", "", "install and load all packages from the given dir instead of the usual locations.")
|
||||
c.FlagSet.StringVar(c.stringSlot("toolexec"), "toolexec", "", "a program to use to invoke toolchain programs like vet and asm.")
|
||||
c.FlagSet.IntVar(c.intSlot("blockprofilerate"), "blockprofilerate", 1, "Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with the given value.")
|
||||
c.FlagSet.StringVar(c.stringSlot("coverprofile"), "coverprofile", "", "Write a coverage profile to the specified file after all tests have passed.")
|
||||
c.FlagSet.StringVar(c.stringSlot("cpuprofile"), "cpuprofile", "", "Write a CPU profile to the specified file before exiting.")
|
||||
c.FlagSet.StringVar(c.stringSlot("memprofile"), "memprofile", "", "Write a memory profile to the specified file after all tests have passed.")
|
||||
c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.")
|
||||
c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)")
|
||||
c.FlagSet.StringVar(c.stringSlot("vet"), "vet", "", "Configure the invocation of 'go vet' to use the comma-separated list of vet checks. If list is 'off', 'go test' does not run 'go vet' at all.")
|
||||
|
||||
if mode == runMode || mode == watchMode {
|
||||
config.Flags(c.FlagSet, "", false)
|
||||
c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
|
||||
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
||||
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
||||
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
||||
if !onWindows {
|
||||
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||
}
|
||||
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
|
||||
c.FlagSet.DurationVar(&(c.Timeout), "timeout", 24*time.Hour, "Suite fails if it does not complete within the specified timeout")
|
||||
}
|
||||
|
||||
if mode == runMode {
|
||||
c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
|
||||
c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
|
||||
c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
|
||||
}
|
||||
|
||||
if mode == watchMode {
|
||||
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
|
||||
c.FlagSet.StringVar(&(c.WatchRegExp), "watchRegExp", `\.go$`, "Files matching this regular expression will be watched for changes")
|
||||
}
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
)
|
||||
|
||||
type compilationInput struct {
|
||||
runner *testrunner.TestRunner
|
||||
result chan compilationOutput
|
||||
}
|
||||
|
||||
type compilationOutput struct {
|
||||
runner *testrunner.TestRunner
|
||||
err error
|
||||
}
|
||||
|
||||
type SuiteRunner struct {
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
||||
return &SuiteRunner{
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
|
||||
//we return this to the consumer, it will return each runner in order as it compiles
|
||||
compilationOutputs := make(chan compilationOutput, len(runners))
|
||||
|
||||
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
|
||||
//we read from these channels in order to ensure we run the suites in order
|
||||
orderedCompilationOutputs := []chan compilationOutput{}
|
||||
for range runners {
|
||||
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
|
||||
}
|
||||
|
||||
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
|
||||
//we prefill the channel then close it, this ensures we compile things in the correct order
|
||||
workPool := make(chan compilationInput, len(runners))
|
||||
for i, runner := range runners {
|
||||
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
|
||||
}
|
||||
close(workPool)
|
||||
|
||||
//pick a reasonable numCompilers
|
||||
if numCompilers == 0 {
|
||||
numCompilers = runtime.NumCPU()
|
||||
}
|
||||
|
||||
//a WaitGroup to help us wait for all compilers to shut down
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(numCompilers)
|
||||
|
||||
//spin up the concurrent compilers
|
||||
for i := 0; i < numCompilers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for input := range workPool {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
|
||||
if willCompile != nil {
|
||||
willCompile(input.runner.Suite)
|
||||
}
|
||||
|
||||
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
||||
var err error
|
||||
retries := 0
|
||||
for retries <= 5 {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
if err = input.runner.Compile(); err == nil {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
}
|
||||
|
||||
input.result <- compilationOutput{input.runner, err}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
//read from the compilation output channels *in order* and send them to the caller
|
||||
//close the compilationOutputs channel to tell the caller we're done
|
||||
go func() {
|
||||
defer close(compilationOutputs)
|
||||
for _, orderedCompilationOutput := range orderedCompilationOutputs {
|
||||
select {
|
||||
case compilationOutput := <-orderedCompilationOutput:
|
||||
compilationOutputs <- compilationOutput
|
||||
case <-r.interruptHandler.C:
|
||||
//interrupt detected, wait for the compilers to shut down then bail
|
||||
//this ensure we clean up after ourselves as we don't leave any compilation processes running
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return compilationOutputs
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
||||
runResult := testrunner.PassingRunResult()
|
||||
|
||||
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
|
||||
|
||||
numSuitesThatRan := 0
|
||||
suitesThatFailed := []testsuite.TestSuite{}
|
||||
for compilationOutput := range compilationOutputs {
|
||||
if compilationOutput.err != nil {
|
||||
fmt.Print(compilationOutput.err.Error())
|
||||
}
|
||||
numSuitesThatRan++
|
||||
suiteRunResult := testrunner.FailingRunResult()
|
||||
if compilationOutput.err == nil {
|
||||
suiteRunResult = compilationOutput.runner.Run()
|
||||
}
|
||||
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
runResult = runResult.Merge(suiteRunResult)
|
||||
if !suiteRunResult.Passed {
|
||||
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
|
||||
if !keepGoing {
|
||||
break
|
||||
}
|
||||
}
|
||||
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
|
||||
fmt.Println("")
|
||||
}
|
||||
}
|
||||
|
||||
if keepGoing && !runResult.Passed {
|
||||
r.listFailedSuites(suitesThatFailed)
|
||||
}
|
||||
|
||||
return runResult, numSuitesThatRan
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
|
||||
fmt.Println("")
|
||||
fmt.Println("There were failures detected in the following suites:")
|
||||
|
||||
maxPackageNameLength := 0
|
||||
for _, suite := range suitesThatFailed {
|
||||
if len(suite.PackageName) > maxPackageNameLength {
|
||||
maxPackageNameLength = len(suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||
|
||||
for _, suite := range suitesThatFailed {
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
|
||||
} else {
|
||||
fmt.Fprintf(colorable.NewColorableStdout(), "\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
// +build go1.10
|
||||
|
||||
package testrunner
|
||||
|
||||
var (
|
||||
buildArgs = []string{"test", "-c"}
|
||||
)
|
|
@ -1,7 +0,0 @@
|
|||
// +build !go1.10
|
||||
|
||||
package testrunner
|
||||
|
||||
var (
|
||||
buildArgs = []string{"test", "-c", "-i"}
|
||||
)
|
|
@ -1,52 +0,0 @@
|
|||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type logWriter struct {
|
||||
buffer *bytes.Buffer
|
||||
lock *sync.Mutex
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func newLogWriter(target io.Writer, node int) *logWriter {
|
||||
return &logWriter{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *logWriter) Write(data []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
w.buffer.Write(data)
|
||||
contents := w.buffer.String()
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
for _, line := range lines[0 : len(lines)-1] {
|
||||
w.log.Println(line)
|
||||
}
|
||||
|
||||
w.buffer.Reset()
|
||||
w.buffer.Write([]byte(lines[len(lines)-1]))
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *logWriter) Close() error {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.buffer.Len() > 0 {
|
||||
w.log.Println(w.buffer.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package testrunner
|
||||
|
||||
type RunResult struct {
|
||||
Passed bool
|
||||
HasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func PassingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: true,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func FailingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: false,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (r RunResult) Merge(o RunResult) RunResult {
|
||||
return RunResult{
|
||||
Passed: r.Passed && o.Passed,
|
||||
HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
|
||||
}
|
||||
}
|
|
@ -1,554 +0,0 @@
|
|||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type TestRunner struct {
|
||||
Suite testsuite.TestSuite
|
||||
|
||||
compiled bool
|
||||
compilationTargetPath string
|
||||
|
||||
numCPU int
|
||||
parallelStream bool
|
||||
timeout time.Duration
|
||||
goOpts map[string]interface{}
|
||||
additionalArgs []string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
CoverageFile string
|
||||
}
|
||||
|
||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner {
|
||||
runner := &TestRunner{
|
||||
Suite: suite,
|
||||
numCPU: numCPU,
|
||||
parallelStream: parallelStream,
|
||||
goOpts: goOpts,
|
||||
additionalArgs: additionalArgs,
|
||||
timeout: timeout,
|
||||
stderr: new(bytes.Buffer),
|
||||
}
|
||||
|
||||
if !suite.Precompiled {
|
||||
runner.compilationTargetPath, _ = filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
|
||||
}
|
||||
|
||||
return runner
|
||||
}
|
||||
|
||||
func (t *TestRunner) Compile() error {
|
||||
return t.CompileTo(t.compilationTargetPath)
|
||||
}
|
||||
|
||||
func (t *TestRunner) BuildArgs(path string) []string {
|
||||
args := make([]string, len(buildArgs), len(buildArgs)+3)
|
||||
copy(args, buildArgs)
|
||||
args = append(args, "-o", path, t.Suite.Path)
|
||||
|
||||
if t.getCoverMode() != "" {
|
||||
args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode()))
|
||||
} else {
|
||||
if t.shouldCover() || t.getCoverPackage() != "" {
|
||||
args = append(args, "-cover", "-covermode=atomic")
|
||||
}
|
||||
}
|
||||
|
||||
boolOpts := []string{
|
||||
"a",
|
||||
"n",
|
||||
"msan",
|
||||
"race",
|
||||
"x",
|
||||
"work",
|
||||
"linkshared",
|
||||
}
|
||||
|
||||
for _, opt := range boolOpts {
|
||||
if s, found := t.goOpts[opt].(*bool); found && *s {
|
||||
args = append(args, fmt.Sprintf("-%s", opt))
|
||||
}
|
||||
}
|
||||
|
||||
intOpts := []string{
|
||||
"memprofilerate",
|
||||
"blockprofilerate",
|
||||
}
|
||||
|
||||
for _, opt := range intOpts {
|
||||
if s, found := t.goOpts[opt].(*int); found {
|
||||
args = append(args, fmt.Sprintf("-%s=%d", opt, *s))
|
||||
}
|
||||
}
|
||||
|
||||
stringOpts := []string{
|
||||
"asmflags",
|
||||
"buildmode",
|
||||
"compiler",
|
||||
"gccgoflags",
|
||||
"installsuffix",
|
||||
"ldflags",
|
||||
"pkgdir",
|
||||
"toolexec",
|
||||
"coverprofile",
|
||||
"cpuprofile",
|
||||
"memprofile",
|
||||
"outputdir",
|
||||
"coverpkg",
|
||||
"tags",
|
||||
"gcflags",
|
||||
"vet",
|
||||
"mod",
|
||||
}
|
||||
|
||||
for _, opt := range stringOpts {
|
||||
if s, found := t.goOpts[opt].(*string); found && *s != "" {
|
||||
args = append(args, fmt.Sprintf("-%s=%s", opt, *s))
|
||||
}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func (t *TestRunner) CompileTo(path string) error {
|
||||
if t.compiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.Suite.Precompiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
args := t.BuildArgs(path)
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
if len(output) > 0 {
|
||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output)
|
||||
}
|
||||
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||
}
|
||||
|
||||
if len(output) > 0 {
|
||||
fmt.Println(string(output))
|
||||
}
|
||||
|
||||
if !fileExists(path) {
|
||||
compiledFile := t.Suite.PackageName + ".test"
|
||||
if fileExists(compiledFile) {
|
||||
// seems like we are on an old go version that does not support the -o flag on go test
|
||||
// move the compiled test file to the desired location by hand
|
||||
err = os.Rename(compiledFile, path)
|
||||
if err != nil {
|
||||
// We cannot move the file, perhaps because the source and destination
|
||||
// are on different partitions. We can copy the file, however.
|
||||
err = copyFile(compiledFile, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to copy compiled file: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
|
||||
}
|
||||
}
|
||||
|
||||
t.compiled = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil || !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// copyFile copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFile(src, dst string) error {
|
||||
srcInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := srcInfo.Mode()
|
||||
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer in.Close()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := out.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Chmod(mode)
|
||||
}
|
||||
|
||||
func (t *TestRunner) Run() RunResult {
|
||||
if t.Suite.IsGinkgo {
|
||||
if t.numCPU > 1 {
|
||||
if t.parallelStream {
|
||||
return t.runAndStreamParallelGinkgoSuite()
|
||||
} else {
|
||||
return t.runParallelGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runSerialGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runGoTestSuite()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestRunner) CleanUp() {
|
||||
if t.Suite.Precompiled {
|
||||
return
|
||||
}
|
||||
os.Remove(t.compilationTargetPath)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runGoTestSuite() RunResult {
|
||||
return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
writers[cpu] = newLogWriter(os.Stdout, cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
result := make(chan bool)
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
reports := make([]*bytes.Buffer, t.numCPU)
|
||||
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
||||
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
server.RegisterReporters(aggregator)
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
config.GinkgoConfig.StreamHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
reports[cpu] = &bytes.Buffer{}
|
||||
writers[cpu] = newLogWriter(reports[cpu], cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
//all test processes are done, at this point
|
||||
//we should be able to wait for the aggregator to tell us that it's done
|
||||
|
||||
select {
|
||||
case <-result:
|
||||
fmt.Println("")
|
||||
case <-time.After(time.Second):
|
||||
//the aggregator never got back to us! something must have gone wrong
|
||||
fmt.Println(`
|
||||
-------------------------------------------------------------------
|
||||
| |
|
||||
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||
| |
|
||||
-------------------------------------------------------------------`)
|
||||
fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path)
|
||||
os.Stdout.Sync()
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
for _, report := range reports {
|
||||
fmt.Print(report.String())
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
}
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
const CoverProfileSuffix = ".coverprofile"
|
||||
|
||||
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||
args := []string{"--test.timeout=" + t.timeout.String()}
|
||||
|
||||
coverProfile := t.getCoverProfile()
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
|
||||
testCoverProfile := "--test.coverprofile="
|
||||
|
||||
coverageFile := ""
|
||||
// Set default name for coverage results
|
||||
if coverProfile == "" {
|
||||
coverageFile = t.Suite.PackageName + CoverProfileSuffix
|
||||
} else {
|
||||
coverageFile = coverProfile
|
||||
}
|
||||
|
||||
testCoverProfile += coverageFile
|
||||
|
||||
t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile)
|
||||
|
||||
if t.numCPU > 1 {
|
||||
testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node)
|
||||
}
|
||||
args = append(args, testCoverProfile)
|
||||
}
|
||||
|
||||
args = append(args, ginkgoArgs...)
|
||||
args = append(args, t.additionalArgs...)
|
||||
|
||||
path := t.compilationTargetPath
|
||||
if t.Suite.Precompiled {
|
||||
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
cmd.Stderr = io.MultiWriter(stream, t.stderr)
|
||||
cmd.Stdout = stream
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldCover() bool {
|
||||
return *t.goOpts["cover"].(*bool)
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldRequireSuite() bool {
|
||||
return *t.goOpts["requireSuite"].(*bool)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverProfile() string {
|
||||
return *t.goOpts["coverprofile"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverPackage() string {
|
||||
return *t.goOpts["coverpkg"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverMode() string {
|
||||
return *t.goOpts["covermode"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldCombineCoverprofiles() bool {
|
||||
return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != ""
|
||||
}
|
||||
|
||||
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||
var res RunResult
|
||||
|
||||
defer func() {
|
||||
if completions != nil {
|
||||
completions <- res
|
||||
}
|
||||
}()
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
|
||||
return res
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
|
||||
if strings.Contains(t.stderr.String(), "warning: no tests to run") {
|
||||
if t.shouldRequireSuite() {
|
||||
res.Passed = false
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) combineCoverprofiles() {
|
||||
profiles := []string{}
|
||||
|
||||
coverProfile := t.getCoverProfile()
|
||||
|
||||
for cpu := 1; cpu <= t.numCPU; cpu++ {
|
||||
var coverFile string
|
||||
if coverProfile == "" {
|
||||
coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu)
|
||||
} else {
|
||||
coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu)
|
||||
}
|
||||
|
||||
coverFile = filepath.Join(t.Suite.Path, coverFile)
|
||||
coverProfile, err := ioutil.ReadFile(coverFile)
|
||||
os.Remove(coverFile)
|
||||
|
||||
if err == nil {
|
||||
profiles = append(profiles, string(coverProfile))
|
||||
}
|
||||
}
|
||||
|
||||
if len(profiles) != t.numCPU {
|
||||
return
|
||||
}
|
||||
|
||||
lines := map[string]int{}
|
||||
lineOrder := []string{}
|
||||
for i, coverProfile := range profiles {
|
||||
for _, line := range strings.Split(coverProfile, "\n")[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
components := strings.Split(line, " ")
|
||||
count, _ := strconv.Atoi(components[len(components)-1])
|
||||
prefix := strings.Join(components[0:len(components)-1], " ")
|
||||
lines[prefix] += count
|
||||
if i == 0 {
|
||||
lineOrder = append(lineOrder, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output := []string{"mode: atomic"}
|
||||
for _, line := range lineOrder {
|
||||
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
|
||||
}
|
||||
finalOutput := strings.Join(output, "\n")
|
||||
|
||||
finalFilename := ""
|
||||
|
||||
if coverProfile != "" {
|
||||
finalFilename = coverProfile
|
||||
} else {
|
||||
finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix)
|
||||
}
|
||||
|
||||
coverageFilepath := filepath.Join(t.Suite.Path, finalFilename)
|
||||
ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666)
|
||||
|
||||
t.CoverageFile = coverageFilepath
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
package testsuite
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TestSuite struct {
|
||||
Path string
|
||||
PackageName string
|
||||
IsGinkgo bool
|
||||
Precompiled bool
|
||||
}
|
||||
|
||||
func PrecompiledTestSuite(path string) (TestSuite, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".test" {
|
||||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
dir := relPath(filepath.Dir(path))
|
||||
packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
|
||||
|
||||
return TestSuite{
|
||||
Path: dir,
|
||||
PackageName: packageName,
|
||||
IsGinkgo: true,
|
||||
Precompiled: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SuitesInDir(dir string, recurse bool) []TestSuite {
|
||||
suites := []TestSuite{}
|
||||
|
||||
if vendorExperimentCheck(dir) {
|
||||
return suites
|
||||
}
|
||||
|
||||
files, _ := ioutil.ReadDir(dir)
|
||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, New(dir, files))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
|
||||
if string(dir[0]) != "." {
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func New(dir string, files []os.FileInfo) TestSuite {
|
||||
return TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||
}
|
||||
}
|
||||
|
||||
func packageNameForSuite(dir string) string {
|
||||
path, _ := filepath.Abs(dir)
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
|
||||
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
// +build !go1.6
|
||||
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// "This change will only be enabled if the go command is run with
|
||||
// GO15VENDOREXPERIMENT=1 in its environment."
|
||||
// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC
|
||||
func vendorExperimentCheck(dir string) bool {
|
||||
vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
|
||||
return vendorExperiment == "1" && path.Base(dir) == "vendor"
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
// +build go1.6
|
||||
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// in 1.6 the vendor directory became the default go behaviour, so now
|
||||
// check if its disabled.
|
||||
func vendorExperimentCheck(dir string) bool {
|
||||
vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
|
||||
return vendorExperiment != "0" && path.Base(dir) == "vendor"
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue