build(deps): bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0
Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.6.1 to 2.7.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.6.1...v2.7.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
813dc7ddad
commit
a5bc34d2e4
|
|
@ -24,7 +24,7 @@ require (
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
github.com/jinzhu/copier v0.3.5
|
github.com/jinzhu/copier v0.3.5
|
||||||
github.com/json-iterator/go v1.1.12
|
github.com/json-iterator/go v1.1.12
|
||||||
github.com/onsi/ginkgo/v2 v2.6.1
|
github.com/onsi/ginkgo/v2 v2.7.0
|
||||||
github.com/onsi/gomega v1.24.2
|
github.com/onsi/gomega v1.24.2
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc2
|
github.com/opencontainers/image-spec v1.1.0-rc2
|
||||||
|
|
|
||||||
|
|
@ -1386,8 +1386,9 @@ github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7
|
||||||
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
|
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
|
||||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||||
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
|
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
|
||||||
github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
||||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,16 @@
|
||||||
|
## 2.7.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
|
||||||
|
- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
|
||||||
|
- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
|
||||||
|
- Color aliases for custom color support (#1101) [49fab7a]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
|
||||||
|
- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
|
||||||
|
- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
|
||||||
|
|
||||||
## 2.6.1
|
## 2.6.1
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ const Pending = internal.Pending
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
|
Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
|
||||||
Tests in ordered containers cannot be marked as serial - mark the ordered container instead.
|
Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
|
||||||
|
|
||||||
You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
|
You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
|
||||||
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
|
@ -54,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat
|
||||||
const Serial = internal.Serial
|
const Serial = internal.Serial
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear.
|
Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
|
||||||
They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
|
They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
|
||||||
|
|
||||||
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
|
|
@ -62,6 +62,16 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat
|
||||||
*/
|
*/
|
||||||
const Ordered = internal.Ordered
|
const Ordered = internal.Ordered
|
||||||
|
|
||||||
|
/*
|
||||||
|
ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
|
||||||
|
|
||||||
|
ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
|
||||||
|
|
||||||
|
You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
|
||||||
|
You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
|
||||||
|
*/
|
||||||
|
const ContinueOnFailure = internal.ContinueOnFailure
|
||||||
|
|
||||||
/*
|
/*
|
||||||
OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
|
OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
|
||||||
per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
|
per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -50,14 +51,36 @@ func NewWithNoColorBool(noColor bool) Formatter {
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(colorMode ColorMode) Formatter {
|
func New(colorMode ColorMode) Formatter {
|
||||||
|
colorAliases := map[string]int{
|
||||||
|
"black": 0,
|
||||||
|
"red": 1,
|
||||||
|
"green": 2,
|
||||||
|
"yellow": 3,
|
||||||
|
"blue": 4,
|
||||||
|
"magenta": 5,
|
||||||
|
"cyan": 6,
|
||||||
|
"white": 7,
|
||||||
|
}
|
||||||
|
for colorAlias, n := range colorAliases {
|
||||||
|
colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
|
||||||
|
}
|
||||||
|
|
||||||
getColor := func(color, defaultEscapeCode string) string {
|
getColor := func(color, defaultEscapeCode string) string {
|
||||||
color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
|
color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
|
||||||
envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
|
envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
|
||||||
if escapeCode := os.Getenv(envVar); escapeCode != "" {
|
envVarColor := os.Getenv(envVar)
|
||||||
return escapeCode
|
if envVarColor == "" {
|
||||||
}
|
|
||||||
return defaultEscapeCode
|
return defaultEscapeCode
|
||||||
}
|
}
|
||||||
|
if colorCode, ok := colorAliases[envVarColor]; ok {
|
||||||
|
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||||
|
}
|
||||||
|
colorCode, err := strconv.Atoi(envVarColor)
|
||||||
|
if err != nil || colorCode < 0 || colorCode > 255 {
|
||||||
|
return defaultEscapeCode
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
|
||||||
|
}
|
||||||
|
|
||||||
f := Formatter{
|
f := Formatter{
|
||||||
ColorMode: colorMode,
|
ColorMode: colorMode,
|
||||||
|
|
|
||||||
|
|
@ -95,6 +95,8 @@ type group struct {
|
||||||
runOnceTracker map[runOncePair]types.SpecState
|
runOnceTracker map[runOncePair]types.SpecState
|
||||||
|
|
||||||
succeeded bool
|
succeeded bool
|
||||||
|
failedInARunOnceBefore bool
|
||||||
|
continueOnFailure bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newGroup(suite *Suite) *group {
|
func newGroup(suite *Suite) *group {
|
||||||
|
|
@ -103,6 +105,8 @@ func newGroup(suite *Suite) *group {
|
||||||
runOncePairs: map[uint]runOncePairs{},
|
runOncePairs: map[uint]runOncePairs{},
|
||||||
runOnceTracker: map[runOncePair]types.SpecState{},
|
runOnceTracker: map[runOncePair]types.SpecState{},
|
||||||
succeeded: true,
|
succeeded: true,
|
||||||
|
failedInARunOnceBefore: false,
|
||||||
|
continueOnFailure: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -137,10 +141,14 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
|
||||||
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
|
if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
|
||||||
return types.SpecStateSkipped, types.Failure{}
|
return types.SpecStateSkipped, types.Failure{}
|
||||||
}
|
}
|
||||||
if !g.succeeded {
|
if !g.succeeded && !g.continueOnFailure {
|
||||||
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
"Spec skipped because an earlier spec in an ordered container failed")
|
"Spec skipped because an earlier spec in an ordered container failed")
|
||||||
}
|
}
|
||||||
|
if g.failedInARunOnceBefore && g.continueOnFailure {
|
||||||
|
return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
|
||||||
|
"Spec skipped because a BeforeAll node failed")
|
||||||
|
}
|
||||||
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
|
||||||
for _, pair := range beforeOncePairs {
|
for _, pair := range beforeOncePairs {
|
||||||
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
|
||||||
|
|
@ -168,7 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
|
||||||
return lastSpecID == specID
|
return lastSpecID == specID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
|
||||||
|
failedInARunOnceBefore := false
|
||||||
pairs := g.runOncePairs[spec.SubjectID()]
|
pairs := g.runOncePairs[spec.SubjectID()]
|
||||||
|
|
||||||
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
|
||||||
|
|
@ -194,6 +203,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||||
}
|
}
|
||||||
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
if g.suite.currentSpecReport.State != types.SpecStatePassed {
|
||||||
terminatingNode, terminatingPair = node, oncePair
|
terminatingNode, terminatingPair = node, oncePair
|
||||||
|
failedInARunOnceBefore = !terminatingPair.isZero()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -216,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||||
//this node has already been run on this attempt, don't rerun it
|
//this node has already been run on this attempt, don't rerun it
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
pair := runOncePair{}
|
var pair runOncePair
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
|
||||||
// check if we were generated in an AfterNode that has already run
|
// check if we were generated in an AfterNode that has already run
|
||||||
|
|
@ -246,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||||
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
|
||||||
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
|
||||||
}
|
}
|
||||||
case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
|
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
|
||||||
if isFinalAttempt {
|
if isFinalAttempt {
|
||||||
return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
|
if g.continueOnFailure {
|
||||||
|
return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
|
||||||
|
} else {
|
||||||
|
return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
|
||||||
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
|
||||||
|
|
@ -281,10 +295,12 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) {
|
||||||
includeDeferCleanups = true
|
includeDeferCleanups = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return failedInARunOnceBefore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *group) run(specs Specs) {
|
func (g *group) run(specs Specs) {
|
||||||
g.specs = specs
|
g.specs = specs
|
||||||
|
g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
|
||||||
for _, spec := range g.specs {
|
for _, spec := range g.specs {
|
||||||
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
|
||||||
}
|
}
|
||||||
|
|
@ -301,8 +317,8 @@ func (g *group) run(specs Specs) {
|
||||||
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
|
||||||
|
|
||||||
g.suite.currentSpecReport.StartTime = time.Now()
|
g.suite.currentSpecReport.StartTime = time.Now()
|
||||||
|
failedInARunOnceBefore := false
|
||||||
if !skip {
|
if !skip {
|
||||||
|
|
||||||
var maxAttempts = 1
|
var maxAttempts = 1
|
||||||
|
|
||||||
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
|
||||||
|
|
@ -327,7 +343,7 @@ func (g *group) run(specs Specs) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
g.attemptSpec(attempt == maxAttempts-1, spec)
|
failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
|
||||||
|
|
||||||
g.suite.currentSpecReport.EndTime = time.Now()
|
g.suite.currentSpecReport.EndTime = time.Now()
|
||||||
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
|
||||||
|
|
@ -355,6 +371,7 @@ func (g *group) run(specs Specs) {
|
||||||
g.suite.processCurrentSpecReport()
|
g.suite.processCurrentSpecReport()
|
||||||
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
|
||||||
g.succeeded = false
|
g.succeeded = false
|
||||||
|
g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
|
||||||
}
|
}
|
||||||
g.suite.selectiveLock.Lock()
|
g.suite.selectiveLock.Lock()
|
||||||
g.suite.currentSpecReport = types.SpecReport{}
|
g.suite.currentSpecReport = types.SpecReport{}
|
||||||
|
|
|
||||||
|
|
@ -51,6 +51,7 @@ type Node struct {
|
||||||
MarkedPending bool
|
MarkedPending bool
|
||||||
MarkedSerial bool
|
MarkedSerial bool
|
||||||
MarkedOrdered bool
|
MarkedOrdered bool
|
||||||
|
MarkedContinueOnFailure bool
|
||||||
MarkedOncePerOrdered bool
|
MarkedOncePerOrdered bool
|
||||||
FlakeAttempts int
|
FlakeAttempts int
|
||||||
MustPassRepeatedly int
|
MustPassRepeatedly int
|
||||||
|
|
@ -69,6 +70,7 @@ type focusType bool
|
||||||
type pendingType bool
|
type pendingType bool
|
||||||
type serialType bool
|
type serialType bool
|
||||||
type orderedType bool
|
type orderedType bool
|
||||||
|
type continueOnFailureType bool
|
||||||
type honorsOrderedType bool
|
type honorsOrderedType bool
|
||||||
type suppressProgressReporting bool
|
type suppressProgressReporting bool
|
||||||
|
|
||||||
|
|
@ -76,6 +78,7 @@ const Focus = focusType(true)
|
||||||
const Pending = pendingType(true)
|
const Pending = pendingType(true)
|
||||||
const Serial = serialType(true)
|
const Serial = serialType(true)
|
||||||
const Ordered = orderedType(true)
|
const Ordered = orderedType(true)
|
||||||
|
const ContinueOnFailure = continueOnFailureType(true)
|
||||||
const OncePerOrdered = honorsOrderedType(true)
|
const OncePerOrdered = honorsOrderedType(true)
|
||||||
const SuppressProgressReporting = suppressProgressReporting(true)
|
const SuppressProgressReporting = suppressProgressReporting(true)
|
||||||
|
|
||||||
|
|
@ -133,6 +136,8 @@ func isDecoration(arg interface{}) bool {
|
||||||
return true
|
return true
|
||||||
case t == reflect.TypeOf(Ordered):
|
case t == reflect.TypeOf(Ordered):
|
||||||
return true
|
return true
|
||||||
|
case t == reflect.TypeOf(ContinueOnFailure):
|
||||||
|
return true
|
||||||
case t == reflect.TypeOf(OncePerOrdered):
|
case t == reflect.TypeOf(OncePerOrdered):
|
||||||
return true
|
return true
|
||||||
case t == reflect.TypeOf(SuppressProgressReporting):
|
case t == reflect.TypeOf(SuppressProgressReporting):
|
||||||
|
|
@ -241,6 +246,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||||
if !nodeType.Is(types.NodeTypeContainer) {
|
if !nodeType.Is(types.NodeTypeContainer) {
|
||||||
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
|
||||||
}
|
}
|
||||||
|
case t == reflect.TypeOf(ContinueOnFailure):
|
||||||
|
node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
|
||||||
|
if !nodeType.Is(types.NodeTypeContainer) {
|
||||||
|
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
|
||||||
|
}
|
||||||
case t == reflect.TypeOf(OncePerOrdered):
|
case t == reflect.TypeOf(OncePerOrdered):
|
||||||
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
|
||||||
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
|
||||||
|
|
@ -386,6 +396,10 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||||
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if node.MarkedContinueOnFailure && !node.MarkedOrdered {
|
||||||
|
appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
|
hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
|
||||||
|
|
||||||
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
|
if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,58 @@ import (
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type SortableSpecs struct {
|
||||||
|
Specs Specs
|
||||||
|
Indexes []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSortableSpecs(specs Specs) *SortableSpecs {
|
||||||
|
indexes := make([]int, len(specs))
|
||||||
|
for i := range specs {
|
||||||
|
indexes[i] = i
|
||||||
|
}
|
||||||
|
return &SortableSpecs{
|
||||||
|
Specs: specs,
|
||||||
|
Indexes: indexes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (s *SortableSpecs) Len() int { return len(s.Indexes) }
|
||||||
|
func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
|
||||||
|
func (s *SortableSpecs) Less(i, j int) bool {
|
||||||
|
a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
|
||||||
|
|
||||||
|
firstOrderedA := a.Nodes.FirstNodeMarkedOrdered()
|
||||||
|
firstOrderedB := b.Nodes.FirstNodeMarkedOrdered()
|
||||||
|
if firstOrderedA.ID == firstOrderedB.ID && !firstOrderedA.IsZero() {
|
||||||
|
// strictly preserve order in ordered containers. ID will track this as IDs are generated monotonically
|
||||||
|
return a.FirstNodeWithType(types.NodeTypeIt).ID < b.FirstNodeWithType(types.NodeTypeIt).ID
|
||||||
|
}
|
||||||
|
|
||||||
|
aCLs := a.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations()
|
||||||
|
bCLs := b.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations()
|
||||||
|
for i := 0; i < len(aCLs) && i < len(bCLs); i++ {
|
||||||
|
aCL, bCL := aCLs[i], bCLs[i]
|
||||||
|
if aCL.FileName < bCL.FileName {
|
||||||
|
return true
|
||||||
|
} else if aCL.FileName > bCL.FileName {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if aCL.LineNumber < bCL.LineNumber {
|
||||||
|
return true
|
||||||
|
} else if aCL.LineNumber > bCL.LineNumber {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// either everything is equal or we have different lengths of CLs
|
||||||
|
if len(aCLs) < len(bCLs) {
|
||||||
|
return true
|
||||||
|
} else if len(aCLs) > len(bCLs) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// ok, now we are sure everything was equal. so we use the spec text to break ties
|
||||||
|
return a.Text() < b.Text()
|
||||||
|
}
|
||||||
|
|
||||||
type GroupedSpecIndices []SpecIndices
|
type GroupedSpecIndices []SpecIndices
|
||||||
type SpecIndices []int
|
type SpecIndices []int
|
||||||
|
|
||||||
|
|
@ -28,12 +80,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||||
// Seed a new random source based on thee configured random seed.
|
// Seed a new random source based on thee configured random seed.
|
||||||
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
|
||||||
|
|
||||||
// first break things into execution groups
|
// first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
|
||||||
|
sortableSpecs := NewSortableSpecs(specs)
|
||||||
|
sort.Sort(sortableSpecs)
|
||||||
|
|
||||||
|
// then we break things into execution groups
|
||||||
// a group represents a single unit of execution and is a collection of SpecIndices
|
// a group represents a single unit of execution and is a collection of SpecIndices
|
||||||
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
// usually a group is just a single spec, however ordered containers must be preserved as a single group
|
||||||
executionGroupIDs := []uint{}
|
executionGroupIDs := []uint{}
|
||||||
executionGroups := map[uint]SpecIndices{}
|
executionGroups := map[uint]SpecIndices{}
|
||||||
for idx, spec := range specs {
|
for _, idx := range sortableSpecs.Indexes {
|
||||||
|
spec := specs[idx]
|
||||||
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
groupNode := spec.Nodes.FirstNodeMarkedOrdered()
|
||||||
if groupNode.IsZero() {
|
if groupNode.IsZero() {
|
||||||
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
|
||||||
|
|
@ -48,7 +105,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||||
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
// we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
|
||||||
shufflableGroupingIDs := []uint{}
|
shufflableGroupingIDs := []uint{}
|
||||||
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
shufflableGroupingIDToGroupIDs := map[uint][]uint{}
|
||||||
shufflableGroupingsIDToSortKeys := map[uint]string{}
|
|
||||||
|
|
||||||
// for each execution group we're going to have to pick a node to represent how the
|
// for each execution group we're going to have to pick a node to represent how the
|
||||||
// execution group is grouped for shuffling:
|
// execution group is grouped for shuffling:
|
||||||
|
|
@ -57,7 +113,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||||
nodeTypesToShuffle = types.NodeTypeIt
|
nodeTypesToShuffle = types.NodeTypeIt
|
||||||
}
|
}
|
||||||
|
|
||||||
//so, fo reach execution group:
|
//so, for each execution group:
|
||||||
for _, groupID := range executionGroupIDs {
|
for _, groupID := range executionGroupIDs {
|
||||||
// pick out a representative spec
|
// pick out a representative spec
|
||||||
representativeSpec := specs[executionGroups[groupID][0]]
|
representativeSpec := specs[executionGroups[groupID][0]]
|
||||||
|
|
@ -72,22 +128,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
|
||||||
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
|
||||||
// record the shuffleable group ID
|
// record the shuffleable group ID
|
||||||
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
|
||||||
// and record the sort key to use
|
|
||||||
shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id
|
|
||||||
sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool {
|
|
||||||
keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]]
|
|
||||||
keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]]
|
|
||||||
if keyA == keyB {
|
|
||||||
return shufflableGroupingIDs[i] < shufflableGroupingIDs[j]
|
|
||||||
} else {
|
|
||||||
return keyA < keyB
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
|
||||||
orderedGroups := GroupedSpecIndices{}
|
orderedGroups := GroupedSpecIndices{}
|
||||||
permutation := r.Perm(len(shufflableGroupingIDs))
|
permutation := r.Perm(len(shufflableGroupingIDs))
|
||||||
|
|
|
||||||
|
|
@ -151,6 +151,13 @@ func (suite *Suite) PushNode(node Node) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if node.MarkedContinueOnFailure {
|
||||||
|
firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
|
||||||
|
if !firstOrderedNode.IsZero() {
|
||||||
|
return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if node.NodeType == types.NodeTypeContainer {
|
if node.NodeType == types.NodeTypeContainer {
|
||||||
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
// During PhaseBuildTopLevel we only track the top level containers without entering them
|
||||||
// We only enter the top level container nodes during PhaseBuildTree
|
// We only enter the top level container nodes during PhaseBuildTree
|
||||||
|
|
|
||||||
|
|
@ -298,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
|
||||||
|
return GinkgoError{
|
||||||
|
Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
|
||||||
|
Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
|
||||||
|
CodeLocation: cl,
|
||||||
|
DocLink: "ordered-containers",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* DeferCleanup errors */
|
/* DeferCleanup errors */
|
||||||
func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
|
func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
|
||||||
return GinkgoError{
|
return GinkgoError{
|
||||||
|
|
|
||||||
|
|
@ -604,6 +604,9 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{
|
||||||
func (ss SpecState) String() string {
|
func (ss SpecState) String() string {
|
||||||
return ssEnumSupport.String(uint(ss))
|
return ssEnumSupport.String(uint(ss))
|
||||||
}
|
}
|
||||||
|
func (ss SpecState) GomegaString() string {
|
||||||
|
return ssEnumSupport.String(uint(ss))
|
||||||
|
}
|
||||||
func (ss *SpecState) UnmarshalJSON(b []byte) error {
|
func (ss *SpecState) UnmarshalJSON(b []byte) error {
|
||||||
out, err := ssEnumSupport.UnmarshJSON(b)
|
out, err := ssEnumSupport.UnmarshJSON(b)
|
||||||
*ss = SpecState(out)
|
*ss = SpecState(out)
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
package types
|
package types
|
||||||
|
|
||||||
const VERSION = "2.6.1"
|
const VERSION = "2.7.0"
|
||||||
|
|
|
||||||
|
|
@ -374,7 +374,7 @@ github.com/modern-go/concurrent
|
||||||
# github.com/modern-go/reflect2 v1.0.2
|
# github.com/modern-go/reflect2 v1.0.2
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/modern-go/reflect2
|
github.com/modern-go/reflect2
|
||||||
# github.com/onsi/ginkgo/v2 v2.6.1
|
# github.com/onsi/ginkgo/v2 v2.7.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/onsi/ginkgo/v2
|
github.com/onsi/ginkgo/v2
|
||||||
github.com/onsi/ginkgo/v2/config
|
github.com/onsi/ginkgo/v2/config
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue