Move vegeta steadyUpPacer to pkg and add combinedPacer (#547)

* move steadyUpPacer to pkg and add combinedPacer

* fix codereview issues

* fix codereview issues

* fix codereview issues

* run dep-codegen.sh

* function rename, return error for constructor

* fix code review issues

* remove default values in constructor

* add more unit tests
This commit is contained in:
Chi Zhang 2019-08-01 15:39:44 -07:00 committed by Knative Prow Robot
parent 717a85540f
commit 2b848f7196
25 changed files with 5040 additions and 0 deletions

29
Gopkg.lock generated
View File

@ -289,6 +289,14 @@
revision = "7c29201646fa3de8506f701213473dd407f19646"
version = "v0.3.7"
[[projects]]
branch = "master"
digest = "1:aa0434674a14402891f31f393b4dd53792bc67eee05158748d999096a8826ed3"
name = "github.com/influxdata/tdigest"
packages = ["."]
pruneopts = "NUT"
revision = "bf2b5ad3c0a925c44a0d2842c5d8182113cd248e"
[[projects]]
digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885"
name = "github.com/jmespath/go-jmespath"
@ -311,6 +319,18 @@
pruneopts = "NUT"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
branch = "master"
digest = "1:927762c6729b4e72957ba3310e485ed09cf8451c5a637a52fd016a9fe09e7936"
name = "github.com/mailru/easyjson"
packages = [
"buffer",
"jlexer",
"jwriter",
]
pruneopts = "NUT"
revision = "b2ccc519800e761ac8000b95e5d57c80a897ff9e"
[[projects]]
digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
name = "github.com/markbates/inflect"
@ -462,6 +482,14 @@
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
branch = "master"
digest = "1:25bec9bd4bd740f94fcbd977bff20ca1b43a658645577ca8a705e639aba1eea4"
name = "github.com/tsenart/vegeta"
packages = ["lib"]
pruneopts = "NUT"
revision = "b5f4fca92137581c0f118ba6b106ba0cb3a2c84e"
[[projects]]
digest = "1:0e3fd52087079d1289983e4fef32268ca965973f5370b69204e2934185527baa"
name = "go.opencensus.io"
@ -1157,6 +1185,7 @@
"github.com/pkg/errors",
"github.com/rogpeppe/go-internal/semver",
"github.com/spf13/pflag",
"github.com/tsenart/vegeta/lib",
"go.opencensus.io/plugin/ochttp",
"go.opencensus.io/plugin/ochttp/propagation/b3",
"go.opencensus.io/stats",

View File

@ -0,0 +1,118 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pacers
import (
"errors"
"fmt"
"strings"
"time"
vegeta "github.com/tsenart/vegeta/lib"
)
// combinedPacer is a Pacer that combines multiple Pacers and runs them sequentially when being used for attack.
type combinedPacer struct {
// pacers is a list of pacers that will be used sequentially for attack.
// MUST have more than 1 pacer.
pacers []vegeta.Pacer
// durations is the list of durations for the given Pacers.
// MUST have the same length as Pacers, and each duration MUST be longer than 1 second.
durations []time.Duration
// totalDuration is sum of the given Durations.
totalDuration uint64
// stepDurations is the accumulative duration of each step calculated from the given Durations.
stepDurations []uint64
curPacerIndex uint
prevElapsedHits uint64
prevElapsedTime uint64
}
// NewCombined returns a new CombinedPacer with the given config.
func NewCombined(pacers []vegeta.Pacer, durations []time.Duration) (vegeta.Pacer, error) {
if len(pacers) == 0 || len(durations) == 0 || len(pacers) != len(durations) || len(pacers) == 1 {
return nil, errors.New("configuration for this CombinedPacer is invalid")
}
var totalDuration uint64
var stepDurations = make([]uint64, len(pacers))
for i, duration := range durations {
if duration < 1*time.Second {
return nil, errors.New("duration for each pacer must be longer than 1 second")
}
totalDuration += uint64(duration)
if i == 0 {
stepDurations[0] = uint64(duration)
} else {
stepDurations[i] = stepDurations[i-1] + uint64(duration)
}
}
pacer := &combinedPacer{
pacers: pacers,
durations: durations,
totalDuration: totalDuration,
stepDurations: stepDurations,
}
return pacer, nil
}
// combinedPacer satisfies the Pacer interface.
var _ vegeta.Pacer = &combinedPacer{}
// String returns a pretty-printed description of the combinedPacer's behaviour.
func (cp *combinedPacer) String() string {
var sb strings.Builder
for i := range cp.pacers {
sb.WriteString(fmt.Sprintf("Pacer: %s, Duration: %s\n", cp.pacers[i], cp.durations[i]))
}
return sb.String()
}
func (cp *combinedPacer) Pace(elapsedTime time.Duration, elapsedHits uint64) (time.Duration, bool) {
pacerTimeOffset := uint64(elapsedTime) % cp.totalDuration
pacerIndex := cp.pacerIndex(pacerTimeOffset)
// If it needs to switch to the next pacer, update prevElapsedTime, prevElapsedHits and curPacerIndex.
if pacerIndex != cp.curPacerIndex {
cp.prevElapsedTime = uint64(elapsedTime)
cp.prevElapsedHits = elapsedHits
cp.curPacerIndex = pacerIndex
}
// Use the adjusted elapsedTime and elapsedHits to get the time to wait for the next hit.
curPacer := cp.pacers[cp.curPacerIndex]
curElapsedTime := time.Duration(uint64(elapsedTime) - cp.prevElapsedTime)
curElapsedHits := elapsedHits - cp.prevElapsedHits
return curPacer.Pace(curElapsedTime, curElapsedHits)
}
// pacerIndex returns the index of pacer that pacerTimeOffset falls into
func (cp *combinedPacer) pacerIndex(pacerTimeOffset uint64) uint {
i, j := 0, len(cp.stepDurations)
for i < j {
m := i + (j-i)/2
if pacerTimeOffset >= cp.stepDurations[m] {
i = m + 1
} else {
j = m
}
}
return uint(i)
}

View File

@ -0,0 +1,123 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pacers
import (
"testing"
"time"
vegeta "github.com/tsenart/vegeta/lib"
)
func TestCombinedPacer(t *testing.T) {
pacer1 := vegeta.Rate{Freq: 1, Per: time.Second}
pacer2 := vegeta.Rate{Freq: 5, Per: time.Second}
pacer3 := vegeta.Rate{Freq: 10, Per: time.Second}
pacers := []vegeta.Pacer{pacer1, pacer2, pacer3}
durations := []time.Duration{5 * time.Second, 5 * time.Second, 10 * time.Second}
pacer, _ := NewCombined(pacers, durations)
for _, tt := range []struct {
name string
elapsedTime time.Duration
elapsedHits uint64
expectedNextHit time.Duration
expectedStop bool
}{{
name: "test the first hit",
elapsedTime: 0 * time.Second,
elapsedHits: 0,
expectedNextHit: 1 * time.Second,
}, {
name: "test the switch pacer hit",
elapsedTime: 5 * time.Second,
elapsedHits: 5,
expectedNextHit: 200 * time.Millisecond,
}, {
name: "test the pacer middle hit",
elapsedTime: 7 * time.Second,
elapsedHits: 15,
expectedNextHit: 200 * time.Millisecond,
}, {
name: "test the last hit",
elapsedTime: 20 * time.Second,
elapsedHits: 130,
expectedNextHit: 1 * time.Second,
}, {
name: "test the loop back pacer hit",
elapsedTime: 22 * time.Second,
elapsedHits: 132,
expectedNextHit: 1 * time.Second,
}, {
name: "test the catch up hit",
elapsedTime: 24 * time.Second,
elapsedHits: 130,
expectedNextHit: 0,
}} {
t.Run(tt.name, func(t *testing.T) {
nextHit, _ := pacer.Pace(tt.elapsedTime, tt.elapsedHits)
if nextHit != tt.expectedNextHit {
t.Errorf(
"expected next hit for elapseTime %v and elapsedHits %d is %v, got %v",
tt.elapsedTime, tt.elapsedHits,
tt.expectedNextHit, nextHit,
)
}
})
}
}
func TestInvalidCombinedPacer(t *testing.T) {
for _, tt := range []struct {
name string
pacers []vegeta.Pacer
durations []time.Duration
}{{
name: "pacers must not be empty",
pacers: make([]vegeta.Pacer, 0),
durations: []time.Duration{10 * time.Second},
}, {
name: "durations must not be empty",
pacers: []vegeta.Pacer{vegeta.Rate{Freq: 10, Per: 10 * time.Second}},
durations: make([]time.Duration, 0),
}, {
name: "pacers and durations must have the same length",
pacers: []vegeta.Pacer{
vegeta.Rate{Freq: 10, Per: 10 * time.Second},
vegeta.Rate{Freq: 10, Per: 5 * time.Second},
},
durations: []time.Duration{10 * time.Second},
}, {
name: "pacers length must be more than 1",
pacers: []vegeta.Pacer{vegeta.Rate{Freq: 10, Per: 10 * time.Second}},
durations: []time.Duration{10 * time.Second},
}, {
name: "duration for each pacer must be longer than 1 second",
pacers: []vegeta.Pacer{
vegeta.Rate{Freq: 10, Per: 10 * time.Second},
vegeta.Rate{Freq: 10, Per: 5 * time.Second},
},
durations: []time.Duration{500 * time.Millisecond, 10 * time.Second},
}} {
t.Run(tt.name, func(t *testing.T) {
_, err := NewCombined(tt.pacers, tt.durations)
if err == nil {
t.Errorf("the provided configuration should be invalid: %v, %v", tt.pacers, tt.durations)
}
})
}
}

View File

@ -0,0 +1,137 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pacers
import (
"errors"
"fmt"
"math"
"time"
vegeta "github.com/tsenart/vegeta/lib"
)
// steadyUpPacer is a Pacer that describes attack request rates that increases in the beginning then becomes steady.
// Max | ,----------------
// | /
// | /
// | /
// | /
// Min -+------------------------------> t
// |<-Up->|
type steadyUpPacer struct {
// upDuration is the duration that attack request rates increase from Min to Max.
// MUST be larger than 0.
upDuration time.Duration
// min is the attack request rates from the beginning.
// MUST be larger than 0.
min vegeta.Rate
// max is the maximum and final steady attack request rates.
// MUST be larger than Min.
max vegeta.Rate
slope float64
minHitsPerNs float64
maxHitsPerNs float64
}
// NewSteadyUp returns a new SteadyUpPacer with the given config.
func NewSteadyUp(min vegeta.Rate, max vegeta.Rate, upDuration time.Duration) (vegeta.Pacer, error) {
if upDuration <= 0 || min.Freq <= 0 || min.Per <= 0 || max.Freq <= 0 || max.Per <= 0 {
return nil, errors.New("configuration for this SteadyUpPacer is invalid")
}
minHitsPerNs := hitsPerNs(min)
maxHitsPerNs := hitsPerNs(max)
if minHitsPerNs >= maxHitsPerNs {
return nil, errors.New("min rate must be smaller than max rate for SteadyUpPacer")
}
pacer := &steadyUpPacer{
min: min,
max: max,
upDuration: upDuration,
slope: (maxHitsPerNs - minHitsPerNs) / float64(upDuration),
minHitsPerNs: minHitsPerNs,
maxHitsPerNs: maxHitsPerNs,
}
return pacer, nil
}
// steadyUpPacer satisfies the Pacer interface.
var _ vegeta.Pacer = &steadyUpPacer{}
// String returns a pretty-printed description of the steadyUpPacer's behaviour.
func (sup *steadyUpPacer) String() string {
return fmt.Sprintf("Up{%s + %s / %s}, then Steady{%s}", sup.min, sup.max, sup.upDuration, sup.max)
}
// Pace determines the length of time to sleep until the next hit is sent.
func (sup *steadyUpPacer) Pace(elapsedTime time.Duration, elapsedHits uint64) (time.Duration, bool) {
expectedHits := sup.hits(elapsedTime)
if elapsedHits < uint64(expectedHits) {
// Running behind, send next hit immediately.
return 0, false
}
// Re-arranging our hits equation to provide a duration given the number of
// requests sent is non-trivial, so we must solve for the duration numerically.
nsPerHit := 1 / sup.hitsPerNs(elapsedTime)
hitsToWait := float64(elapsedHits+1) - expectedHits
nextHitIn := time.Duration(nsPerHit * hitsToWait)
// If we can't converge to an error of <1e-3 within 10 iterations, bail.
// This rarely even loops for any large Period if hitsToWait is small.
for i := 0; i < 10; i++ {
hitsAtGuess := sup.hits(elapsedTime + nextHitIn)
err := float64(elapsedHits+1) - hitsAtGuess
if math.Abs(err) < 1e-3 {
return nextHitIn, false
}
nextHitIn = time.Duration(float64(nextHitIn) / (hitsAtGuess - float64(elapsedHits)))
}
return nextHitIn, false
}
// hits returns the number of expected hits for this pacer during the given time.
func (sup *steadyUpPacer) hits(t time.Duration) float64 {
// If t is smaller than the upDuration, calculate the hits as a trapezoid.
if t <= sup.upDuration {
curtHitsPerNs := sup.hitsPerNs(t)
return (curtHitsPerNs + sup.minHitsPerNs) / 2.0 * float64(t)
}
// If t is larger than the upDuration, calculate the hits as a trapezoid + a rectangle.
upHits := (sup.maxHitsPerNs + sup.minHitsPerNs) / 2.0 * float64(sup.upDuration)
steadyHits := sup.maxHitsPerNs * float64(t-sup.upDuration)
return upHits + steadyHits
}
// hitsPerNs returns the attack rate for this pacer at a given time.
func (sup *steadyUpPacer) hitsPerNs(t time.Duration) float64 {
if t <= sup.upDuration {
return sup.minHitsPerNs + float64(t)*sup.slope
}
return sup.maxHitsPerNs
}
// hitsPerNs returns the attack rate this ConstantPacer represents, in
// fractional hits per nanosecond.
func hitsPerNs(cp vegeta.ConstantPacer) float64 {
return float64(cp.Freq) / float64(cp.Per)
}

View File

@ -0,0 +1,105 @@
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pacers
import (
"testing"
"time"
vegeta "github.com/tsenart/vegeta/lib"
)
func TestSteadyUpPacer(t *testing.T) {
minRate := vegeta.Rate{Freq: 1, Per: time.Second}
maxRate := vegeta.Rate{Freq: 5, Per: time.Second}
pacer, _ := NewSteadyUp(minRate, maxRate, 10*time.Second)
for _, tt := range []struct {
name string
elapsedTime time.Duration
elapsedHits uint64
expectedNextHit time.Duration
expectedStop bool
}{{
name: "test the first hit",
elapsedTime: 0 * time.Second,
elapsedHits: 0,
expectedNextHit: 853658536 * time.Nanosecond,
}, {
name: "test the up hit",
elapsedTime: 3 * time.Second,
elapsedHits: 5,
expectedNextHit: 520407468 * time.Nanosecond,
}, {
name: "test the catch up hit",
elapsedTime: 4 * time.Second,
elapsedHits: 2,
expectedNextHit: 0,
}, {
name: "test the steady hit",
elapsedTime: 7052432251 * time.Nanosecond,
elapsedHits: 17,
expectedNextHit: 258228895 * time.Nanosecond,
}} {
t.Run(tt.name, func(t *testing.T) {
nextHit, _ := pacer.Pace(tt.elapsedTime, tt.elapsedHits)
if nextHit != tt.expectedNextHit {
t.Errorf(
"expected next hit for elapsedTime %v and elapsedHits %d is %v, got %v",
tt.elapsedTime, tt.elapsedHits,
tt.expectedNextHit, nextHit,
)
}
})
}
}
func TestInvalidSteadyUpPacer(t *testing.T) {
for _, tt := range []struct {
name string
min vegeta.Rate
max vegeta.Rate
upDuration time.Duration
}{{
name: "up duration must be larger than 0",
min: vegeta.Rate{Freq: 10, Per: time.Second},
max: vegeta.Rate{Freq: 5, Per: time.Second},
upDuration: 0,
}, {
name: "min rate must be larger than 0",
min: vegeta.Rate{Freq: 0, Per: time.Second},
max: vegeta.Rate{Freq: 5, Per: time.Second},
upDuration: 10 * time.Second,
}, {
name: "max rate must be larger than 0",
min: vegeta.Rate{Freq: 10, Per: time.Second},
max: vegeta.Rate{Freq: 0, Per: time.Second},
upDuration: 10 * time.Second,
}, {
name: "min rate must be smaller than max rate",
min: vegeta.Rate{Freq: 10, Per: time.Second},
max: vegeta.Rate{Freq: 6, Per: time.Second},
upDuration: 10 * time.Second,
}} {
t.Run(tt.name, func(t *testing.T) {
_, err := NewSteadyUp(tt.min, tt.max, tt.upDuration)
if err == nil {
t.Errorf("the provided configuration should be invalid: %v, %v, %v", tt.min, tt.max, tt.upDuration)
}
})
}
}

202
vendor/github.com/influxdata/tdigest/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 InfluxData Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

59
vendor/github.com/influxdata/tdigest/centroid.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
package tdigest
import (
"fmt"
"sort"
)
// ErrWeightLessThanZero is used when the weight is not able to be processed.
const ErrWeightLessThanZero = Error("centroid weight cannot be less than zero")
// Error is a domain error encountered while processing tdigests
type Error string
func (e Error) Error() string {
return string(e)
}
// Centroid average position of all points in a shape
type Centroid struct {
Mean float64
Weight float64
}
func (c *Centroid) String() string {
return fmt.Sprintf("{mean: %f weight: %f}", c.Mean, c.Weight)
}
// Add averages the two centroids together and update this centroid
func (c *Centroid) Add(r Centroid) error {
if r.Weight < 0 {
return ErrWeightLessThanZero
}
if c.Weight != 0 {
c.Weight += r.Weight
c.Mean += r.Weight * (r.Mean - c.Mean) / c.Weight
} else {
c.Weight = r.Weight
c.Mean = r.Mean
}
return nil
}
// CentroidList is sorted by the Mean of the centroid, ascending.
type CentroidList []Centroid
func (l *CentroidList) Clear() {
*l = (*l)[0:0]
}
func (l CentroidList) Len() int { return len(l) }
func (l CentroidList) Less(i, j int) bool { return l[i].Mean < l[j].Mean }
func (l CentroidList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
// NewCentroidList creates a priority queue for the centroids
func NewCentroidList(centroids []Centroid) CentroidList {
l := CentroidList(centroids)
sort.Sort(l)
return l
}

229
vendor/github.com/influxdata/tdigest/tdigest.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
package tdigest
import (
"math"
"sort"
)
type TDigest struct {
Compression float64
maxProcessed int
maxUnprocessed int
processed CentroidList
unprocessed CentroidList
cumulative []float64
processedWeight float64
unprocessedWeight float64
min float64
max float64
}
func New() *TDigest {
return NewWithCompression(1000)
}
func NewWithCompression(c float64) *TDigest {
t := &TDigest{
Compression: c,
}
t.maxProcessed = processedSize(0, t.Compression)
t.maxUnprocessed = unprocessedSize(0, t.Compression)
t.processed = make([]Centroid, 0, t.maxProcessed)
t.unprocessed = make([]Centroid, 0, t.maxUnprocessed+1)
t.min = math.MaxFloat64
t.max = -math.MaxFloat64
return t
}
func (t *TDigest) Add(x, w float64) {
if math.IsNaN(x) {
return
}
t.AddCentroid(Centroid{Mean: x, Weight: w})
}
func (t *TDigest) AddCentroidList(c CentroidList) {
l := c.Len()
for i := 0; i < l; i++ {
diff := l - i
room := t.maxUnprocessed - t.unprocessed.Len()
mid := i + diff
if room < diff {
mid = i + room
}
for i < mid {
t.AddCentroid(c[i])
i++
}
}
}
func (t *TDigest) AddCentroid(c Centroid) {
t.unprocessed = append(t.unprocessed, c)
t.unprocessedWeight += c.Weight
if t.processed.Len() > t.maxProcessed ||
t.unprocessed.Len() > t.maxUnprocessed {
t.process()
}
}
func (t *TDigest) process() {
if t.unprocessed.Len() > 0 ||
t.processed.Len() > t.maxProcessed {
// Append all processed centroids to the unprocessed list and sort
t.unprocessed = append(t.unprocessed, t.processed...)
sort.Sort(&t.unprocessed)
// Reset processed list with first centroid
t.processed.Clear()
t.processed = append(t.processed, t.unprocessed[0])
t.processedWeight += t.unprocessedWeight
t.unprocessedWeight = 0
soFar := t.unprocessed[0].Weight
limit := t.processedWeight * t.integratedQ(1.0)
for _, centroid := range t.unprocessed[1:] {
projected := soFar + centroid.Weight
if projected <= limit {
soFar = projected
(&t.processed[t.processed.Len()-1]).Add(centroid)
} else {
k1 := t.integratedLocation(soFar / t.processedWeight)
limit = t.processedWeight * t.integratedQ(k1+1.0)
soFar += centroid.Weight
t.processed = append(t.processed, centroid)
}
}
t.min = math.Min(t.min, t.processed[0].Mean)
t.max = math.Max(t.max, t.processed[t.processed.Len()-1].Mean)
t.updateCumulative()
t.unprocessed.Clear()
}
}
func (t *TDigest) updateCumulative() {
t.cumulative = make([]float64, t.processed.Len()+1)
prev := 0.0
for i, centroid := range t.processed {
cur := centroid.Weight
t.cumulative[i] = prev + cur/2.0
prev = prev + cur
}
t.cumulative[t.processed.Len()] = prev
}
func (t *TDigest) Quantile(q float64) float64 {
t.process()
if q < 0 || q > 1 || t.processed.Len() == 0 {
return math.NaN()
}
if t.processed.Len() == 1 {
return t.processed[0].Mean
}
index := q * t.processedWeight
if index <= t.processed[0].Weight/2.0 {
return t.min + 2.0*index/t.processed[0].Weight*(t.processed[0].Mean-t.min)
}
lower := sort.Search(len(t.cumulative), func(i int) bool {
return t.cumulative[i] >= index
})
if lower+1 != len(t.cumulative) {
z1 := index - t.cumulative[lower-1]
z2 := t.cumulative[lower] - index
return weightedAverage(t.processed[lower-1].Mean, z2, t.processed[lower].Mean, z1)
}
z1 := index - t.processedWeight - t.processed[lower-1].Weight/2.0
z2 := (t.processed[lower-1].Weight / 2.0) - z1
return weightedAverage(t.processed[t.processed.Len()-1].Mean, z1, t.max, z2)
}
func (t *TDigest) CDF(x float64) float64 {
t.process()
switch t.processed.Len() {
case 0:
return 0.0
case 1:
width := t.max - t.min
if x <= t.min {
return 0.0
}
if x >= t.max {
return 1.0
}
if (x - t.min) <= width {
// min and max are too close together to do any viable interpolation
return 0.5
}
return (x - t.min) / width
}
if x <= t.min {
return 0.0
}
if x >= t.max {
return 1.0
}
m0 := t.processed[0].Mean
// Left Tail
if x <= m0 {
if m0-t.min > 0 {
return (x - t.min) / (m0 - t.min) * t.processed[0].Weight / t.processedWeight / 2.0
}
return 0.0
}
// Right Tail
mn := t.processed[t.processed.Len()-1].Mean
if x >= mn {
if t.max-mn > 0.0 {
return 1.0 - (t.max-x)/(t.max-mn)*t.processed[t.processed.Len()-1].Weight/t.processedWeight/2.0
}
return 1.0
}
upper := sort.Search(t.processed.Len(), func(i int) bool {
return t.processed[i].Mean > x
})
z1 := x - t.processed[upper-1].Mean
z2 := t.processed[upper].Mean - x
return weightedAverage(t.cumulative[upper-1], z2, t.cumulative[upper], z1) / t.processedWeight
}
func (t *TDigest) integratedQ(k float64) float64 {
return (math.Sin(math.Min(k, t.Compression)*math.Pi/t.Compression-math.Pi/2.0) + 1.0) / 2.0
}
func (t *TDigest) integratedLocation(q float64) float64 {
return t.Compression * (math.Asin(2.0*q-1.0) + math.Pi/2.0) / math.Pi
}
func weightedAverage(x1, w1, x2, w2 float64) float64 {
if x1 <= x2 {
return weightedAverageSorted(x1, w1, x2, w2)
}
return weightedAverageSorted(x2, w2, x1, w1)
}
func weightedAverageSorted(x1, w1, x2, w2 float64) float64 {
x := (x1*w1 + x2*w2) / (w1 + w2)
return math.Max(x1, math.Min(x, x2))
}
func processedSize(size int, compression float64) int {
if size == 0 {
return int(2 * math.Ceil(compression))
}
return size
}
func unprocessedSize(size int, compression float64) int {
if size == 0 {
return int(8 * math.Ceil(compression))
}
return size
}

7
vendor/github.com/mailru/easyjson/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2016 Mail.Ru Group
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

270
vendor/github.com/mailru/easyjson/buffer/pool.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
// reduce copying and to allow reuse of individual chunks.
package buffer
import (
"io"
"sync"
)
// PoolConfig contains configuration for the allocation and reuse strategy.
type PoolConfig struct {
StartSize int // Minimum chunk size that is allocated.
PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
MaxSize int // Maximum chunk size that will be allocated.
}
var config = PoolConfig{
StartSize: 128,
PooledSize: 512,
MaxSize: 32768,
}
// Reuse pool: chunk size -> pool.
var buffers = map[int]*sync.Pool{}
func initBuffers() {
for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
buffers[l] = new(sync.Pool)
}
}
func init() {
initBuffers()
}
// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
func Init(cfg PoolConfig) {
config = cfg
initBuffers()
}
// putBuf puts a chunk to reuse pool if it can be reused.
func putBuf(buf []byte) {
size := cap(buf)
if size < config.PooledSize {
return
}
if c := buffers[size]; c != nil {
c.Put(buf[:0])
}
}
// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
func getBuf(size int) []byte {
if size < config.PooledSize {
return make([]byte, 0, size)
}
if c := buffers[size]; c != nil {
v := c.Get()
if v != nil {
return v.([]byte)
}
}
return make([]byte, 0, size)
}
// Buffer is a buffer optimized for serialization without extra copying.
type Buffer struct {
// Buf is the current chunk that can be used for serialization.
Buf []byte
toPool []byte
bufs [][]byte
}
// EnsureSpace makes sure that the current chunk contains at least s free bytes,
// possibly creating a new chunk.
func (b *Buffer) EnsureSpace(s int) {
if cap(b.Buf)-len(b.Buf) >= s {
return
}
l := len(b.Buf)
if l > 0 {
if cap(b.toPool) != cap(b.Buf) {
// Chunk was reallocated, toPool can be pooled.
putBuf(b.toPool)
}
if cap(b.bufs) == 0 {
b.bufs = make([][]byte, 0, 8)
}
b.bufs = append(b.bufs, b.Buf)
l = cap(b.toPool) * 2
} else {
l = config.StartSize
}
if l > config.MaxSize {
l = config.MaxSize
}
b.Buf = getBuf(l)
b.toPool = b.Buf
}
// AppendByte appends a single byte to buffer.
func (b *Buffer) AppendByte(data byte) {
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
b.EnsureSpace(1)
}
b.Buf = append(b.Buf, data)
}
// AppendBytes appends a byte slice to buffer.
func (b *Buffer) AppendBytes(data []byte) {
for len(data) > 0 {
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
b.EnsureSpace(1)
}
sz := cap(b.Buf) - len(b.Buf)
if sz > len(data) {
sz = len(data)
}
b.Buf = append(b.Buf, data[:sz]...)
data = data[sz:]
}
}
// AppendBytes appends a string to buffer.
func (b *Buffer) AppendString(data string) {
for len(data) > 0 {
if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
b.EnsureSpace(1)
}
sz := cap(b.Buf) - len(b.Buf)
if sz > len(data) {
sz = len(data)
}
b.Buf = append(b.Buf, data[:sz]...)
data = data[sz:]
}
}
// Size computes the size of a buffer by adding sizes of every chunk.
func (b *Buffer) Size() int {
size := len(b.Buf)
for _, buf := range b.bufs {
size += len(buf)
}
return size
}
// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
var n int
for _, buf := range b.bufs {
if err == nil {
n, err = w.Write(buf)
written += n
}
putBuf(buf)
}
if err == nil {
n, err = w.Write(b.Buf)
written += n
}
putBuf(b.toPool)
b.bufs = nil
b.Buf = nil
b.toPool = nil
return
}
// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
// copied if it does not fit in a single chunk. You can optionally provide one byte
// slice as argument that it will try to reuse.
func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
if len(b.bufs) == 0 {
ret := b.Buf
b.toPool = nil
b.Buf = nil
return ret
}
var ret []byte
size := b.Size()
// If we got a buffer as argument and it is big enought, reuse it.
if len(reuse) == 1 && cap(reuse[0]) >= size {
ret = reuse[0][:0]
} else {
ret = make([]byte, 0, size)
}
for _, buf := range b.bufs {
ret = append(ret, buf...)
putBuf(buf)
}
ret = append(ret, b.Buf...)
putBuf(b.toPool)
b.bufs = nil
b.toPool = nil
b.Buf = nil
return ret
}
type readCloser struct {
offset int
bufs [][]byte
}
func (r *readCloser) Read(p []byte) (n int, err error) {
for _, buf := range r.bufs {
// Copy as much as we can.
x := copy(p[n:], buf[r.offset:])
n += x // Increment how much we filled.
// Did we empty the whole buffer?
if r.offset+x == len(buf) {
// On to the next buffer.
r.offset = 0
r.bufs = r.bufs[1:]
// We can release this buffer.
putBuf(buf)
} else {
r.offset += x
}
if n == len(p) {
break
}
}
// No buffers left or nothing read?
if len(r.bufs) == 0 {
err = io.EOF
}
return
}
func (r *readCloser) Close() error {
// Release all remaining buffers.
for _, buf := range r.bufs {
putBuf(buf)
}
// In case Close gets called multiple times.
r.bufs = nil
return nil
}
// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
func (b *Buffer) ReadCloser() io.ReadCloser {
ret := &readCloser{0, append(b.bufs, b.Buf)}
b.bufs = nil
b.toPool = nil
b.Buf = nil
return ret
}

24
vendor/github.com/mailru/easyjson/jlexer/bytestostr.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// This file will only be included to the build if neither
// easyjson_nounsafe nor appengine build tag is set. See README notes
// for more details.
//+build !easyjson_nounsafe
//+build !appengine
package jlexer
import (
"reflect"
"unsafe"
)
// bytesToStr creates a string pointing at the slice to avoid copying.
//
// Warning: the string returned by the function should be used with care, as the whole input data
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
// may be garbage-collected even when the string exists.
func bytesToStr(data []byte) string {
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
return *(*string)(unsafe.Pointer(&shdr))
}

View File

@ -0,0 +1,13 @@
// This file is included to the build if any of the buildtags below
// are defined. Refer to README notes for more details.
//+build easyjson_nounsafe appengine
package jlexer
// bytesToStr creates a string normally from []byte
//
// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
func bytesToStr(data []byte) string {
return string(data)
}

15
vendor/github.com/mailru/easyjson/jlexer/error.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package jlexer
import "fmt"
// LexerError implements the error interface and represents all possible errors that can be
// generated during parsing the JSON data.
type LexerError struct {
Reason string
Offset int
Data string
}
func (l *LexerError) Error() string {
return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
}

1182
vendor/github.com/mailru/easyjson/jlexer/lexer.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

390
vendor/github.com/mailru/easyjson/jwriter/writer.go generated vendored Normal file
View File

@ -0,0 +1,390 @@
// Package jwriter contains a JSON writer.
package jwriter
import (
"io"
"strconv"
"unicode/utf8"
"github.com/mailru/easyjson/buffer"
)
// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
// Flags field in Writer is used to set and pass them around.
type Flags int
const (
NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
)
// Writer is a JSON writer.
type Writer struct {
Flags Flags
Error error
Buffer buffer.Buffer
NoEscapeHTML bool
}
// Size returns the size of the data that was written out.
func (w *Writer) Size() int {
return w.Buffer.Size()
}
// DumpTo outputs the data to given io.Writer, resetting the buffer.
func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
return w.Buffer.DumpTo(out)
}
// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
// as argument that it will try to reuse.
func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
if w.Error != nil {
return nil, w.Error
}
return w.Buffer.BuildBytes(reuse...), nil
}
// ReadCloser returns an io.ReadCloser that can be used to read the data.
// ReadCloser also resets the buffer.
func (w *Writer) ReadCloser() (io.ReadCloser, error) {
if w.Error != nil {
return nil, w.Error
}
return w.Buffer.ReadCloser(), nil
}
// RawByte appends raw binary data to the buffer.
func (w *Writer) RawByte(c byte) {
w.Buffer.AppendByte(c)
}
// RawByte appends raw binary data to the buffer.
func (w *Writer) RawString(s string) {
w.Buffer.AppendString(s)
}
// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
// calling with results of MarshalJSON-like functions.
func (w *Writer) Raw(data []byte, err error) {
switch {
case w.Error != nil:
return
case err != nil:
w.Error = err
case len(data) > 0:
w.Buffer.AppendBytes(data)
default:
w.RawString("null")
}
}
// RawText encloses raw binary data in quotes and appends in to the buffer.
// Useful for calling with results of MarshalText-like functions.
func (w *Writer) RawText(data []byte, err error) {
switch {
case w.Error != nil:
return
case err != nil:
w.Error = err
case len(data) > 0:
w.String(string(data))
default:
w.RawString("null")
}
}
// Base64Bytes appends data to the buffer after base64 encoding it
func (w *Writer) Base64Bytes(data []byte) {
if data == nil {
w.Buffer.AppendString("null")
return
}
w.Buffer.AppendByte('"')
w.base64(data)
w.Buffer.AppendByte('"')
}
func (w *Writer) Uint8(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint16(n uint16) {
w.Buffer.EnsureSpace(5)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint32(n uint32) {
w.Buffer.EnsureSpace(10)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint(n uint) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint64(n uint64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
}
func (w *Writer) Int8(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int16(n int16) {
w.Buffer.EnsureSpace(6)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int32(n int32) {
w.Buffer.EnsureSpace(11)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int(n int) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int64(n int64) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
}
func (w *Writer) Uint8Str(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint16Str(n uint16) {
w.Buffer.EnsureSpace(5)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint32Str(n uint32) {
w.Buffer.EnsureSpace(10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) UintStr(n uint) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint64Str(n uint64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) UintptrStr(n uintptr) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int8Str(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int16Str(n int16) {
w.Buffer.EnsureSpace(6)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int32Str(n int32) {
w.Buffer.EnsureSpace(11)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) IntStr(n int) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int64Str(n int64) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Float32(n float32) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
}
func (w *Writer) Float32Str(n float32) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Float64(n float64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
}
func (w *Writer) Float64Str(n float64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Bool(v bool) {
w.Buffer.EnsureSpace(5)
if v {
w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
} else {
w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
}
}
const chars = "0123456789abcdef"
func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
// Note: might make sense to use a table if there are more chars to escape. With 4 chars
// it benchmarks the same.
if escapeHTML {
return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
} else {
return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
}
}
func (w *Writer) String(s string) {
w.Buffer.AppendByte('"')
// Portions of the string that contain no escapes are appended as
// byte slices.
p := 0 // last non-escape symbol
for i := 0; i < len(s); {
c := s[i]
if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
// single-width character, no escaping is required
i++
continue
} else if c < utf8.RuneSelf {
// single-with character, need to escape
w.Buffer.AppendString(s[p:i])
switch c {
case '\t':
w.Buffer.AppendString(`\t`)
case '\r':
w.Buffer.AppendString(`\r`)
case '\n':
w.Buffer.AppendString(`\n`)
case '\\':
w.Buffer.AppendString(`\\`)
case '"':
w.Buffer.AppendString(`\"`)
default:
w.Buffer.AppendString(`\u00`)
w.Buffer.AppendByte(chars[c>>4])
w.Buffer.AppendByte(chars[c&0xf])
}
i++
p = i
continue
}
// broken utf
runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
if runeValue == utf8.RuneError && runeWidth == 1 {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\ufffd`)
i++
p = i
continue
}
// jsonp stuff - tab separator and line separator
if runeValue == '\u2028' || runeValue == '\u2029' {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\u202`)
w.Buffer.AppendByte(chars[runeValue&0xf])
i += runeWidth
p = i
continue
}
i += runeWidth
}
w.Buffer.AppendString(s[p:])
w.Buffer.AppendByte('"')
}
const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
const padChar = '='
func (w *Writer) base64(in []byte) {
if len(in) == 0 {
return
}
w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
si := 0
n := (len(in) / 3) * 3
for si < n {
// Convert 3x 8bit source bytes into 4 bytes
val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
si += 3
}
remain := len(in) - si
if remain == 0 {
return
}
// Add the remaining small block
val := uint(in[si+0]) << 16
if remain == 2 {
val |= uint(in[si+1]) << 8
}
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
switch remain {
case 2:
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
case 1:
w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
}
}

20
vendor/github.com/tsenart/vegeta/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013-2016 Tomás Senart
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

372
vendor/github.com/tsenart/vegeta/lib/attack.go generated vendored Normal file
View File

@ -0,0 +1,372 @@
package vegeta
import (
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"net/http"
"net/url"
"sync"
"time"
"golang.org/x/net/http2"
)
// Attacker is an attack executor which wraps an http.Client
type Attacker struct {
dialer *net.Dialer
client http.Client
stopch chan struct{}
workers uint64
maxWorkers uint64
maxBody int64
redirects int
seqmu sync.Mutex
seq uint64
began time.Time
}
const (
// DefaultRedirects is the default number of times an Attacker follows
// redirects.
DefaultRedirects = 10
// DefaultTimeout is the default amount of time an Attacker waits for a request
// before it times out.
DefaultTimeout = 30 * time.Second
// DefaultConnections is the default amount of max open idle connections per
// target host.
DefaultConnections = 10000
// DefaultWorkers is the default initial number of workers used to carry an attack.
DefaultWorkers = 10
// DefaultMaxWorkers is the default maximum number of workers used to carry an attack.
DefaultMaxWorkers = math.MaxUint64
// DefaultMaxBody is the default max number of bytes to be read from response bodies.
// Defaults to no limit.
DefaultMaxBody = int64(-1)
// NoFollow is the value when redirects are not followed but marked successful
NoFollow = -1
)
var (
// DefaultLocalAddr is the default local IP address an Attacker uses.
DefaultLocalAddr = net.IPAddr{IP: net.IPv4zero}
// DefaultTLSConfig is the default tls.Config an Attacker uses.
DefaultTLSConfig = &tls.Config{InsecureSkipVerify: true}
)
// NewAttacker returns a new Attacker with default options which are overridden
// by the optionally provided opts.
func NewAttacker(opts ...func(*Attacker)) *Attacker {
a := &Attacker{
stopch: make(chan struct{}),
workers: DefaultWorkers,
maxWorkers: DefaultMaxWorkers,
maxBody: DefaultMaxBody,
began: time.Now(),
}
a.dialer = &net.Dialer{
LocalAddr: &net.TCPAddr{IP: DefaultLocalAddr.IP, Zone: DefaultLocalAddr.Zone},
KeepAlive: 30 * time.Second,
}
a.client = http.Client{
Timeout: DefaultTimeout,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: a.dialer.Dial,
TLSClientConfig: DefaultTLSConfig,
MaxIdleConnsPerHost: DefaultConnections,
},
}
for _, opt := range opts {
opt(a)
}
return a
}
// Workers returns a functional option which sets the initial number of workers
// an Attacker uses to hit its targets. More workers may be spawned dynamically
// to sustain the requested rate in the face of slow responses and errors.
func Workers(n uint64) func(*Attacker) {
return func(a *Attacker) { a.workers = n }
}
// MaxWorkers returns a functional option which sets the maximum number of workers
// an Attacker can use to hit its targets.
func MaxWorkers(n uint64) func(*Attacker) {
return func(a *Attacker) { a.maxWorkers = n }
}
// Connections returns a functional option which sets the number of maximum idle
// open connections per target host.
func Connections(n int) func(*Attacker) {
return func(a *Attacker) {
tr := a.client.Transport.(*http.Transport)
tr.MaxIdleConnsPerHost = n
}
}
// Redirects returns a functional option which sets the maximum
// number of redirects an Attacker will follow.
func Redirects(n int) func(*Attacker) {
return func(a *Attacker) {
a.redirects = n
a.client.CheckRedirect = func(_ *http.Request, via []*http.Request) error {
switch {
case n == NoFollow:
return http.ErrUseLastResponse
case n < len(via):
return fmt.Errorf("stopped after %d redirects", n)
default:
return nil
}
}
}
}
// Proxy returns a functional option which sets the `Proxy` field on
// the http.Client's Transport
func Proxy(proxy func(*http.Request) (*url.URL, error)) func(*Attacker) {
return func(a *Attacker) {
tr := a.client.Transport.(*http.Transport)
tr.Proxy = proxy
}
}
// Timeout returns a functional option which sets the maximum amount of time
// an Attacker will wait for a request to be responded to and completely read.
func Timeout(d time.Duration) func(*Attacker) {
return func(a *Attacker) {
a.client.Timeout = d
}
}
// LocalAddr returns a functional option which sets the local address
// an Attacker will use with its requests.
func LocalAddr(addr net.IPAddr) func(*Attacker) {
return func(a *Attacker) {
tr := a.client.Transport.(*http.Transport)
a.dialer.LocalAddr = &net.TCPAddr{IP: addr.IP, Zone: addr.Zone}
tr.Dial = a.dialer.Dial
}
}
// KeepAlive returns a functional option which toggles KeepAlive
// connections on the dialer and transport.
func KeepAlive(keepalive bool) func(*Attacker) {
return func(a *Attacker) {
tr := a.client.Transport.(*http.Transport)
tr.DisableKeepAlives = !keepalive
if !keepalive {
a.dialer.KeepAlive = 0
tr.Dial = a.dialer.Dial
}
}
}
// TLSConfig returns a functional option which sets the *tls.Config for a
// Attacker to use with its requests.
func TLSConfig(c *tls.Config) func(*Attacker) {
return func(a *Attacker) {
tr := a.client.Transport.(*http.Transport)
tr.TLSClientConfig = c
}
}
// HTTP2 returns a functional option which enables or disables HTTP/2 support
// on requests performed by an Attacker.
func HTTP2(enabled bool) func(*Attacker) {
return func(a *Attacker) {
if tr := a.client.Transport.(*http.Transport); enabled {
http2.ConfigureTransport(tr)
} else {
tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
}
}
// H2C returns a functional option which enables H2C support on requests
// performed by an Attacker
func H2C(enabled bool) func(*Attacker) {
return func(a *Attacker) {
if tr := a.client.Transport.(*http.Transport); enabled {
a.client.Transport = &http2.Transport{
AllowHTTP: true,
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
return tr.Dial(network, addr)
},
}
}
}
}
// MaxBody returns a functional option which limits the max number of bytes
// read from response bodies. Set to -1 to disable any limits.
func MaxBody(n int64) func(*Attacker) {
return func(a *Attacker) { a.maxBody = n }
}
// UnixSocket changes the dialer for the attacker to use the specified unix socket file
func UnixSocket(socket string) func(*Attacker) {
return func(a *Attacker) {
if tr, ok := a.client.Transport.(*http.Transport); socket != "" && ok {
tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", socket)
}
}
}
}
// Client returns a functional option that allows you to bring your own http.Client
func Client(c *http.Client) func(*Attacker) {
return func(a *Attacker) { a.client = *c }
}
// Attack reads its Targets from the passed Targeter and attacks them at
// the rate specified by the Pacer. When the duration is zero the attack
// runs until Stop is called. Results are sent to the returned channel as soon
// as they arrive and will have their Attack field set to the given name.
func (a *Attacker) Attack(tr Targeter, p Pacer, du time.Duration, name string) <-chan *Result {
var wg sync.WaitGroup
workers := a.workers
if workers > a.maxWorkers {
workers = a.maxWorkers
}
results := make(chan *Result)
ticks := make(chan struct{})
for i := uint64(0); i < workers; i++ {
wg.Add(1)
go a.attack(tr, name, &wg, ticks, results)
}
go func() {
defer close(results)
defer wg.Wait()
defer close(ticks)
began, count := time.Now(), uint64(0)
for {
elapsed := time.Since(began)
if du > 0 && elapsed > du {
return
}
wait, stop := p.Pace(elapsed, count)
if stop {
return
}
time.Sleep(wait)
if workers < a.maxWorkers {
select {
case ticks <- struct{}{}:
count++
continue
case <-a.stopch:
return
default:
// all workers are blocked. start one more and try again
workers++
wg.Add(1)
go a.attack(tr, name, &wg, ticks, results)
}
}
select {
case ticks <- struct{}{}:
count++
case <-a.stopch:
return
}
}
}()
return results
}
// Stop stops the current attack.
func (a *Attacker) Stop() {
select {
case <-a.stopch:
return
default:
close(a.stopch)
}
}
func (a *Attacker) attack(tr Targeter, name string, workers *sync.WaitGroup, ticks <-chan struct{}, results chan<- *Result) {
defer workers.Done()
for range ticks {
results <- a.hit(tr, name)
}
}
func (a *Attacker) hit(tr Targeter, name string) *Result {
var (
res = Result{Attack: name}
tgt Target
err error
)
a.seqmu.Lock()
res.Timestamp = a.began.Add(time.Since(a.began))
res.Seq = a.seq
a.seq++
a.seqmu.Unlock()
defer func() {
res.Latency = time.Since(res.Timestamp)
if err != nil {
res.Error = err.Error()
}
}()
if err = tr(&tgt); err != nil {
a.Stop()
return &res
}
req, err := tgt.Request()
if err != nil {
return &res
}
r, err := a.client.Do(req)
if err != nil {
return &res
}
defer r.Body.Close()
body := io.Reader(r.Body)
if a.maxBody >= 0 {
body = io.LimitReader(r.Body, a.maxBody)
}
if res.Body, err = ioutil.ReadAll(body); err != nil {
return &res
} else if _, err = io.Copy(ioutil.Discard, r.Body); err != nil {
return &res
}
res.BytesIn = uint64(len(res.Body))
if req.ContentLength != -1 {
res.BytesOut = uint64(req.ContentLength)
}
if res.Code = uint16(r.StatusCode); res.Code < 200 || res.Code >= 400 {
res.Error = r.Status
}
return &res
}

82
vendor/github.com/tsenart/vegeta/lib/histogram.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
package vegeta
import (
"bytes"
"fmt"
"strings"
"time"
)
// Buckets represents an Histogram's latency buckets.
type Buckets []time.Duration
// Histogram is a bucketed latency Histogram.
type Histogram struct {
Buckets Buckets
Counts []uint64
Total uint64
}
// Add implements the Add method of the Report interface by finding the right
// Bucket for the given Result latency and increasing its count by one as well
// as the total count.
func (h *Histogram) Add(r *Result) {
if len(h.Counts) != len(h.Buckets) {
h.Counts = make([]uint64, len(h.Buckets))
}
var i int
for ; i < len(h.Buckets)-1; i++ {
if r.Latency >= h.Buckets[i] && r.Latency < h.Buckets[i+1] {
break
}
}
h.Total++
h.Counts[i]++
}
// MarshalJSON returns a JSON encoding of the buckets and their counts.
func (h *Histogram) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
// Custom marshalling to guarantee order.
buf.WriteString("{")
for i := range h.Buckets {
if i > 0 {
buf.WriteString(", ")
}
if _, err := fmt.Fprintf(&buf, "\"%d\": %d", h.Buckets[i], h.Counts[i]); err != nil {
return nil, err
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// Nth returns the nth bucket represented as a string.
func (bs Buckets) Nth(i int) (left, right string) {
if i >= len(bs)-1 {
return bs[i].String(), "+Inf"
}
return bs[i].String(), bs[i+1].String()
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (bs *Buckets) UnmarshalText(value []byte) error {
if len(value) < 2 || value[0] != '[' || value[len(value)-1] != ']' {
return fmt.Errorf("bad buckets: %s", value)
}
for _, v := range strings.Split(string(value[1:len(value)-1]), ",") {
d, err := time.ParseDuration(strings.TrimSpace(v))
if err != nil {
return err
}
*bs = append(*bs, d)
}
if len(*bs) == 0 {
return fmt.Errorf("bad buckets: %s", value)
}
return nil
}

188
vendor/github.com/tsenart/vegeta/lib/metrics.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package vegeta
import (
"strconv"
"time"
"github.com/influxdata/tdigest"
)
// Metrics holds metrics computed out of a slice of Results which are used
// in some of the Reporters
type Metrics struct {
// Latencies holds computed request latency metrics.
Latencies LatencyMetrics `json:"latencies"`
// Histogram, only if requested
Histogram *Histogram `json:"buckets,omitempty"`
// BytesIn holds computed incoming byte metrics.
BytesIn ByteMetrics `json:"bytes_in"`
// BytesOut holds computed outgoing byte metrics.
BytesOut ByteMetrics `json:"bytes_out"`
// Earliest is the earliest timestamp in a Result set.
Earliest time.Time `json:"earliest"`
// Latest is the latest timestamp in a Result set.
Latest time.Time `json:"latest"`
// End is the latest timestamp in a Result set plus its latency.
End time.Time `json:"end"`
// Duration is the duration of the attack.
Duration time.Duration `json:"duration"`
// Wait is the extra time waiting for responses from targets.
Wait time.Duration `json:"wait"`
// Requests is the total number of requests executed.
Requests uint64 `json:"requests"`
// Rate is the rate of sent requests per second.
Rate float64 `json:"rate"`
// Throughput is the rate of successful requests per second.
Throughput float64 `json:"throughput"`
// Success is the percentage of non-error responses.
Success float64 `json:"success"`
// StatusCodes is a histogram of the responses' status codes.
StatusCodes map[string]int `json:"status_codes"`
// Errors is a set of unique errors returned by the targets during the attack.
Errors []string `json:"errors"`
errors map[string]struct{}
success uint64
}
// Add implements the Add method of the Report interface by adding the given
// Result to Metrics.
func (m *Metrics) Add(r *Result) {
m.init()
m.Requests++
m.StatusCodes[strconv.Itoa(int(r.Code))]++
m.BytesOut.Total += r.BytesOut
m.BytesIn.Total += r.BytesIn
m.Latencies.Add(r.Latency)
if m.Earliest.IsZero() || m.Earliest.After(r.Timestamp) {
m.Earliest = r.Timestamp
}
if r.Timestamp.After(m.Latest) {
m.Latest = r.Timestamp
}
if end := r.End(); end.After(m.End) {
m.End = end
}
if r.Code >= 200 && r.Code < 400 {
m.success++
}
if r.Error != "" {
if _, ok := m.errors[r.Error]; !ok {
m.errors[r.Error] = struct{}{}
m.Errors = append(m.Errors, r.Error)
}
}
if m.Histogram != nil {
m.Histogram.Add(r)
}
}
// Close implements the Close method of the Report interface by computing
// derived summary metrics which don't need to be run on every Add call.
func (m *Metrics) Close() {
m.init()
m.Rate = float64(m.Requests)
m.Throughput = float64(m.success)
m.Duration = m.Latest.Sub(m.Earliest)
m.Wait = m.End.Sub(m.Latest)
if secs := m.Duration.Seconds(); secs > 0 {
m.Rate /= secs
// No need to check for zero because we know m.Duration > 0
m.Throughput /= (m.Duration + m.Wait).Seconds()
}
m.BytesIn.Mean = float64(m.BytesIn.Total) / float64(m.Requests)
m.BytesOut.Mean = float64(m.BytesOut.Total) / float64(m.Requests)
m.Success = float64(m.success) / float64(m.Requests)
m.Latencies.Mean = time.Duration(float64(m.Latencies.Total) / float64(m.Requests))
m.Latencies.P50 = m.Latencies.Quantile(0.50)
m.Latencies.P95 = m.Latencies.Quantile(0.95)
m.Latencies.P99 = m.Latencies.Quantile(0.99)
}
func (m *Metrics) init() {
if m.StatusCodes == nil {
m.StatusCodes = map[string]int{}
}
if m.errors == nil {
m.errors = map[string]struct{}{}
}
if m.Errors == nil {
m.Errors = make([]string, 0)
}
}
// LatencyMetrics holds computed request latency metrics.
type LatencyMetrics struct {
// Total is the total latency sum of all requests in an attack.
Total time.Duration `json:"total"`
// Mean is the mean request latency.
Mean time.Duration `json:"mean"`
// P50 is the 50th percentile request latency.
P50 time.Duration `json:"50th"`
// P95 is the 95th percentile request latency.
P95 time.Duration `json:"95th"`
// P99 is the 99th percentile request latency.
P99 time.Duration `json:"99th"`
// Max is the maximum observed request latency.
Max time.Duration `json:"max"`
estimator estimator
}
// Add adds the given latency to the latency metrics.
func (l *LatencyMetrics) Add(latency time.Duration) {
l.init()
if l.Total += latency; latency > l.Max {
l.Max = latency
}
l.estimator.Add(float64(latency))
}
// Quantile returns the nth quantile from the latency summary.
func (l LatencyMetrics) Quantile(nth float64) time.Duration {
l.init()
return time.Duration(l.estimator.Get(nth))
}
func (l *LatencyMetrics) init() {
if l.estimator == nil {
// This compression parameter value is the recommended value
// for normal uses as per http://javadox.com/com.tdunning/t-digest/3.0/com/tdunning/math/stats/TDigest.html
l.estimator = newTdigestEstimator(100)
}
}
// ByteMetrics holds computed byte flow metrics.
type ByteMetrics struct {
// Total is the total number of flowing bytes in an attack.
Total uint64 `json:"total"`
// Mean is the mean number of flowing bytes per hit.
Mean float64 `json:"mean"`
}
type estimator interface {
Add(sample float64)
Get(quantile float64) float64
}
type tdigestEstimator struct{ *tdigest.TDigest }
func newTdigestEstimator(compression float64) *tdigestEstimator {
return &tdigestEstimator{TDigest: tdigest.NewWithCompression(compression)}
}
func (e *tdigestEstimator) Add(s float64) { e.TDigest.Add(s, 1) }
func (e *tdigestEstimator) Get(q float64) float64 {
return e.TDigest.Quantile(q)
}

279
vendor/github.com/tsenart/vegeta/lib/pacer.go generated vendored Normal file
View File

@ -0,0 +1,279 @@
package vegeta
import (
"fmt"
"math"
"time"
)
// A Pacer defines the rate of hits during an Attack by
// returning the duration an Attacker should wait until
// hitting the next Target. If the second return value
// is true, the attack will terminate.
type Pacer interface {
Pace(elapsed time.Duration, hits uint64) (wait time.Duration, stop bool)
}
// A PacerFunc is a function adapter type that implements
// the Pacer interface.
type PacerFunc func(time.Duration, uint64) (time.Duration, bool)
// Pace implements the Pacer interface.
func (pf PacerFunc) Pace(elapsed time.Duration, hits uint64) (time.Duration, bool) {
return pf(elapsed, hits)
}
// A ConstantPacer defines a constant rate of hits for the target.
type ConstantPacer struct {
Freq int // Frequency (number of occurrences) per ...
Per time.Duration // Time unit, usually 1s
}
// Rate is a type alias for ConstantPacer for backwards-compatibility.
type Rate = ConstantPacer
// ConstantPacer satisfies the Pacer interface.
var _ Pacer = ConstantPacer{}
// String returns a pretty-printed description of the ConstantPacer's behaviour:
// ConstantPacer{Freq: 1, Per: time.Second} => Constant{1 hits/1s}
func (cp ConstantPacer) String() string {
return fmt.Sprintf("Constant{%d hits/%s}", cp.Freq, cp.Per)
}
// Pace determines the length of time to sleep until the next hit is sent.
func (cp ConstantPacer) Pace(elapsed time.Duration, hits uint64) (time.Duration, bool) {
switch {
case cp.Per == 0 || cp.Freq == 0:
return 0, false // Zero value = infinite rate
case cp.Per < 0 || cp.Freq < 0:
return 0, true
}
expectedHits := uint64(cp.Freq) * uint64(elapsed/cp.Per)
if hits < expectedHits {
// Running behind, send next hit immediately.
return 0, false
}
interval := uint64(cp.Per.Nanoseconds() / int64(cp.Freq))
if math.MaxInt64/interval < hits {
// We would overflow delta if we continued, so stop the attack.
return 0, true
}
delta := time.Duration((hits + 1) * interval)
// Zero or negative durations cause time.Sleep to return immediately.
return delta - elapsed, false
}
// hitsPerNs returns the attack rate this ConstantPacer represents, in
// fractional hits per nanosecond.
func (cp ConstantPacer) hitsPerNs() float64 {
return float64(cp.Freq) / float64(cp.Per)
}
const (
// MeanUp is a SinePacer Offset that causes the attack to start
// at the Mean attack rate and increase towards the peak.
MeanUp float64 = 0
// Peak is a SinePacer Offset that causes the attack to start
// at the peak (maximum) attack rate and decrease towards the Mean.
Peak = math.Pi / 2
// MeanDown is a SinePacer Offset that causes the attack to start
// at the Mean attack rate and decrease towards the trough.
MeanDown = math.Pi
// Trough is a SinePacer Offset that causes the attack to start
// at the trough (minimum) attack rate and increase towards the Mean.
Trough = 3 * math.Pi / 2
)
// SinePacer is a Pacer that describes attack request rates with the equation:
// R = MA sin(O+(2𝛑/P)t)
// Where:
// R = Instantaneous attack rate at elapsed time t, hits per nanosecond
// M = Mean attack rate over period P, sp.Mean, hits per nanosecond
// A = Amplitude of sine wave, sp.Amp, hits per nanosecond
// O = Offset of sine wave, sp.StartAt, radians
// P = Period of sine wave, sp.Period, nanoseconds
// t = Elapsed time since attack start, nanoseconds
//
// Many thanks to http://ascii.co.uk/art/sine and "sps" for the ascii here :-)
//
// Mean -| ,-'''-.
// +Amp | ,-' | `-.
// | ,' | `. O=𝛑
// | ,' O=𝛑/2 `. MeanDown
// | / Peak \ /
// |/ \ /
// Mean -+-------------------------\--------------------------> t
// |\ \ /
// | \ \ O=3𝛑/2 /
// | O=0 `. Trough ,'
// | MeanUp `. | ,'
// Mean | `-. | ,-'
// -Amp -| `-,,,-'
// |<-------------------- Period --------------------->|
//
// This equation is integrated with respect to time to derive the expected
// number of hits served at time t after the attack began:
// H = Mt - (AP/2𝛑)cos(O+(2𝛑/P)t) + (AP/2𝛑)cos(O)
// Where:
// H = Total number of hits triggered during t
type SinePacer struct {
// The period of the sine wave, e.g. 20*time.Minute
// MUST BE > 0
Period time.Duration
// The mid-point of the sine wave in freq-per-Duration,
// MUST BE > 0
Mean Rate
// The amplitude of the sine wave in freq-per-Duration,
// MUST NOT BE EQUAL TO OR LARGER THAN MEAN
Amp Rate
// The offset, in radians, for the sine wave at t=0.
StartAt float64
}
// SinePacer satisfies the Pacer interface.
var _ Pacer = SinePacer{}
// String returns a pretty-printed description of the SinePacer's behaviour:
// SinePacer{
// Period: time.Hour,
// Mean: Rate{100, time.Second},
// Amp: Rate{50, time.Second},
// StartAt: MeanDown,
// } =>
// Sine{Constant{100 hits/1s} ± Constant{50 hits/1s} / 1h, offset 1𝛑}
func (sp SinePacer) String() string {
return fmt.Sprintf("Sine{%s ± %s / %s, offset %g𝛑}", sp.Mean, sp.Amp, sp.Period, sp.StartAt/math.Pi)
}
// invalid tests the constraints documented in the SinePacer struct definition.
func (sp SinePacer) invalid() bool {
return sp.Period <= 0 || sp.Mean.hitsPerNs() <= 0 || sp.Amp.hitsPerNs() >= sp.Mean.hitsPerNs()
}
// Pace determines the length of time to sleep until the next hit is sent.
func (sp SinePacer) Pace(elapsedTime time.Duration, elapsedHits uint64) (time.Duration, bool) {
if sp.invalid() {
// If the SinePacer configuration is invalid, stop the attack.
return 0, true
}
expectedHits := sp.hits(elapsedTime)
if elapsedHits < uint64(expectedHits) {
// Running behind, send next hit immediately.
return 0, false
}
// Re-arranging our hits equation to provide a duration given the number of
// requests sent is non-trivial, so we must solve for the duration numerically.
// math.Round() added here because we have to coerce to int64 nanoseconds
// at some point and it corrects a bunch of off-by-one problems.
nsPerHit := math.Round(1 / sp.hitsPerNs(elapsedTime))
hitsToWait := float64(elapsedHits+1) - expectedHits
nextHitIn := time.Duration(nsPerHit * hitsToWait)
// If we can't converge to an error of <1e-3 within 5 iterations, bail.
// This rarely even loops for any large Period if hitsToWait is small.
for i := 0; i < 5; i++ {
hitsAtGuess := sp.hits(elapsedTime + nextHitIn)
err := float64(elapsedHits+1) - hitsAtGuess
if math.Abs(err) < 1e-3 {
return nextHitIn, false
}
nextHitIn = time.Duration(float64(nextHitIn) / (hitsAtGuess - float64(elapsedHits)))
}
return nextHitIn, false
}
// ampHits returns AP/2𝛑, which is the number of hits added or subtracted
// from the Mean due to the Amplitude over a quarter of the Period,
// i.e. from 0 → 𝛑/2 radians
func (sp SinePacer) ampHits() float64 {
return (sp.Amp.hitsPerNs() * float64(sp.Period)) / (2 * math.Pi)
}
// radians converts the elapsed attack time to a radian value.
// The elapsed time t is divided by the wave period, multiplied by 2𝛑 to
// convert to radians, and offset by StartAt radians.
func (sp SinePacer) radians(t time.Duration) float64 {
return sp.StartAt + float64(t)*2*math.Pi/float64(sp.Period)
}
// hitsPerNs calculates the instantaneous rate of attack at
// t nanoseconds after the attack began.
// R = MA sin(O+(2𝛑/P)t)
func (sp SinePacer) hitsPerNs(t time.Duration) float64 {
return sp.Mean.hitsPerNs() + sp.Amp.hitsPerNs()*math.Sin(sp.radians(t))
}
// hits returns the number of hits that have been sent during an attack
// lasting t nanoseconds. It returns a float so we can tell exactly how
// much we've missed our target by when solving numerically in Pace.
// H = Mt - (AP/2𝛑)cos(O+(2𝛑/P)t) + (AP/2𝛑)cos(O)
// This re-arranges to:
// H = Mt + (AP/2𝛑)(cos(O) - cos(O+(2𝛑/P)t))
func (sp SinePacer) hits(t time.Duration) float64 {
if t <= 0 || sp.invalid() {
return 0
}
return sp.Mean.hitsPerNs()*float64(t) + sp.ampHits()*(math.Cos(sp.StartAt)-math.Cos(sp.radians(t)))
}
// LinearPacer paces an attack by starting at a given request rate
// and increasing linearly with the given slope.
type LinearPacer struct {
StartAt Rate
Slope float64
}
// Pace determines the length of time to sleep until the next hit is sent.
func (p LinearPacer) Pace(elapsed time.Duration, hits uint64) (time.Duration, bool) {
switch {
case p.StartAt.Per == 0 || p.StartAt.Freq == 0:
return 0, false // Zero value = infinite rate
case p.StartAt.Per < 0 || p.StartAt.Freq < 0:
return 0, true
}
expectedHits := p.hits(elapsed)
if hits == 0 || hits < uint64(expectedHits) {
// Running behind, send next hit immediately.
return 0, false
}
rate := p.rate(elapsed)
interval := math.Round(1e9 / rate)
if n := uint64(interval); n != 0 && math.MaxInt64/n < hits {
// We would overflow wait if we continued, so stop the attack.
return 0, true
}
delta := float64(hits+1) - expectedHits
wait := time.Duration(interval * delta)
return wait, false
}
// hits returns the number of hits that have been sent during an attack
// lasting t nanoseconds. It returns a float so we can tell exactly how
// much we've missed our target by when solving numerically in Pace.
func (p LinearPacer) hits(t time.Duration) float64 {
if t < 0 {
return 0
}
a := p.Slope
b := p.StartAt.hitsPerNs() * 1e9
x := t.Seconds()
return (a*math.Pow(x, 2))/2 + b*x
}
// rate calculates the instantaneous rate of attack at
// t nanoseconds after the attack began.
func (p LinearPacer) rate(t time.Duration) float64 {
a := p.Slope
x := t.Seconds()
b := p.StartAt.hitsPerNs() * 1e9
return a*x + b
}

252
vendor/github.com/tsenart/vegeta/lib/reporters.go generated vendored Normal file
View File

@ -0,0 +1,252 @@
package vegeta
import (
"encoding/json"
"fmt"
"io"
"sort"
"strings"
"text/tabwriter"
"time"
)
// A Report represents the state a Reporter uses to write out its reports.
type Report interface {
// Add adds a given *Result to a Report.
Add(*Result)
}
// Closer wraps the optional Report Close method.
type Closer interface {
// Close permantently closes a Report, running any necessary book keeping.
Close()
}
// A Reporter function writes out reports to the given io.Writer or returns an
// error in case of failure.
type Reporter func(io.Writer) error
// Report is a convenience method wrapping the Reporter function type.
func (rep Reporter) Report(w io.Writer) error { return rep(w) }
// NewHistogramReporter returns a Reporter that writes out a Histogram as
// aligned, formatted text.
func NewHistogramReporter(h *Histogram) Reporter {
return func(w io.Writer) (err error) {
tw := tabwriter.NewWriter(w, 0, 8, 2, ' ', tabwriter.StripEscape)
if _, err = fmt.Fprintf(tw, "Bucket\t\t#\t%%\tHistogram\n"); err != nil {
return err
}
for i, count := range h.Counts {
ratio := float64(count) / float64(h.Total)
lo, hi := h.Buckets.Nth(i)
pad := strings.Repeat("#", int(ratio*75))
_, err = fmt.Fprintf(tw, "[%s,\t%s]\t%d\t%.2f%%\t%s\n", lo, hi, count, ratio*100, pad)
if err != nil {
return nil
}
}
return tw.Flush()
}
}
// NewTextReporter returns a Reporter that writes out Metrics as aligned,
// formatted text.
func NewTextReporter(m *Metrics) Reporter {
const fmtstr = "Requests\t[total, rate, throughput]\t%d, %.2f, %.2f\n" +
"Duration\t[total, attack, wait]\t%s, %s, %s\n" +
"Latencies\t[mean, 50, 95, 99, max]\t%s, %s, %s, %s, %s\n" +
"Bytes In\t[total, mean]\t%d, %.2f\n" +
"Bytes Out\t[total, mean]\t%d, %.2f\n" +
"Success\t[ratio]\t%.2f%%\n" +
"Status Codes\t[code:count]\t"
return func(w io.Writer) (err error) {
tw := tabwriter.NewWriter(w, 0, 8, 2, ' ', tabwriter.StripEscape)
if _, err = fmt.Fprintf(tw, fmtstr,
m.Requests, m.Rate, m.Throughput,
m.Duration+m.Wait, m.Duration, m.Wait,
m.Latencies.Mean, m.Latencies.P50, m.Latencies.P95, m.Latencies.P99, m.Latencies.Max,
m.BytesIn.Total, m.BytesIn.Mean,
m.BytesOut.Total, m.BytesOut.Mean,
m.Success*100,
); err != nil {
return err
}
codes := make([]string, 0, len(m.StatusCodes))
for code := range m.StatusCodes {
codes = append(codes, code)
}
sort.Strings(codes)
for _, code := range codes {
count := m.StatusCodes[code]
if _, err = fmt.Fprintf(tw, "%s:%d ", code, count); err != nil {
return err
}
}
if _, err = fmt.Fprintln(tw, "\nError Set:"); err != nil {
return err
}
for _, e := range m.Errors {
if _, err = fmt.Fprintln(tw, e); err != nil {
return err
}
}
return tw.Flush()
}
}
// NewJSONReporter returns a Reporter that writes out Metrics as JSON.
func NewJSONReporter(m *Metrics) Reporter {
return func(w io.Writer) error {
return json.NewEncoder(w).Encode(m)
}
}
var logarithmic = []float64{
0.00,
0.100,
0.200,
0.300,
0.400,
0.500,
0.550,
0.600,
0.650,
0.700,
0.750,
0.775,
0.800,
0.825,
0.850,
0.875,
0.8875,
0.900,
0.9125,
0.925,
0.9375,
0.94375,
0.950,
0.95625,
0.9625,
0.96875,
0.971875,
0.975,
0.978125,
0.98125,
0.984375,
0.985938,
0.9875,
0.989062,
0.990625,
0.992188,
0.992969,
0.99375,
0.994531,
0.995313,
0.996094,
0.996484,
0.996875,
0.997266,
0.997656,
0.998047,
0.998242,
0.998437,
0.998633,
0.998828,
0.999023,
0.999121,
0.999219,
0.999316,
0.999414,
0.999512,
0.999561,
0.999609,
0.999658,
0.999707,
0.999756,
0.99978,
0.999805,
0.999829,
0.999854,
0.999878,
0.99989,
0.999902,
0.999915,
0.999927,
0.999939,
0.999945,
0.999951,
0.999957,
0.999963,
0.999969,
0.999973,
0.999976,
0.999979,
0.999982,
0.999985,
0.999986,
0.999988,
0.999989,
0.999991,
0.999992,
0.999993,
0.999994,
0.999995,
0.999996,
0.999997,
0.999998,
0.999999,
1.0,
}
// NewHDRHistogramPlotReporter returns a Reporter that writes out latency metrics
// in a format plottable by http://hdrhistogram.github.io/HdrHistogram/plotFiles.html.
func NewHDRHistogramPlotReporter(m *Metrics) Reporter {
return func(w io.Writer) error {
tw := tabwriter.NewWriter(w, 0, 8, 2, ' ', tabwriter.StripEscape)
_, err := fmt.Fprintf(tw, "Value(ms)\tPercentile\tTotalCount\t1/(1-Percentile)\n")
if err != nil {
return err
}
total := float64(m.Requests)
for _, q := range logarithmic {
value := milliseconds(m.Latencies.Quantile(q))
oneBy := oneByQuantile(q)
count := int64((q * total) + 0.5) // Count at quantile
_, err = fmt.Fprintf(tw, "%f\t%f\t%d\t%f\n", value, q, count, oneBy)
if err != nil {
return err
}
}
return tw.Flush()
}
}
// milliseconds converts the given duration to a number of
// fractional milliseconds. Splitting the integer and fraction
// ourselves guarantees that converting the returned float64 to an
// integer rounds the same way that a pure integer conversion would have,
// even in cases where, say, float64(d.Nanoseconds())/1e9 would have rounded
// differently.
func milliseconds(d time.Duration) float64 {
msec, nsec := d/time.Millisecond, d%time.Millisecond
return float64(msec) + float64(nsec)/1e6
}
func oneByQuantile(q float64) float64 {
if q < 1.0 {
return 1 / (1 - q)
}
return float64(10000000)
}

241
vendor/github.com/tsenart/vegeta/lib/results.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package vegeta
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/csv"
"encoding/gob"
"io"
"sort"
"strconv"
"time"
"github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
func init() {
gob.Register(&Result{})
}
// Result contains the results of a single Target hit.
type Result struct {
Attack string `json:"attack"`
Seq uint64 `json:"seq"`
Code uint16 `json:"code"`
Timestamp time.Time `json:"timestamp"`
Latency time.Duration `json:"latency"`
BytesOut uint64 `json:"bytes_out"`
BytesIn uint64 `json:"bytes_in"`
Error string `json:"error"`
Body []byte `json:"body"`
}
// End returns the time at which a Result ended.
func (r *Result) End() time.Time { return r.Timestamp.Add(r.Latency) }
// Equal returns true if the given Result is equal to the receiver.
func (r Result) Equal(other Result) bool {
return r.Attack == other.Attack &&
r.Seq == other.Seq &&
r.Code == other.Code &&
r.Timestamp.Equal(other.Timestamp) &&
r.Latency == other.Latency &&
r.BytesIn == other.BytesIn &&
r.BytesOut == other.BytesOut &&
r.Error == other.Error &&
bytes.Equal(r.Body, other.Body)
}
// Results is a slice of Result type elements.
type Results []Result
// Add implements the Add method of the Report interface by appending the given
// Result to the slice.
func (rs *Results) Add(r *Result) { *rs = append(*rs, *r) }
// Close implements the Close method of the Report interface by sorting the
// Results.
func (rs *Results) Close() { sort.Sort(rs) }
// The following methods implement sort.Interface
func (rs Results) Len() int { return len(rs) }
func (rs Results) Less(i, j int) bool { return rs[i].Timestamp.Before(rs[j].Timestamp) }
func (rs Results) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
// A Decoder decodes a Result and returns an error in case of failure.
type Decoder func(*Result) error
// A DecoderFactory constructs a new Decoder from a given io.Reader.
type DecoderFactory func(io.Reader) Decoder
// DecoderFor automatically detects the encoding of the first few bytes in
// the given io.Reader and then returns the corresponding Decoder or nil
// in case of failing to detect a supported encoding.
func DecoderFor(r io.Reader) Decoder {
var buf bytes.Buffer
for _, dec := range []DecoderFactory{
NewDecoder,
NewJSONDecoder,
NewCSVDecoder,
} {
rd := io.MultiReader(bytes.NewReader(buf.Bytes()), io.TeeReader(r, &buf))
if err := dec(rd).Decode(&Result{}); err == nil {
return dec(io.MultiReader(&buf, r))
}
}
return nil
}
// NewRoundRobinDecoder returns a new Decoder that round robins across the
// given Decoders on every invocation or decoding error.
func NewRoundRobinDecoder(dec ...Decoder) Decoder {
// Optimization for single Decoder case.
if len(dec) == 1 {
return dec[0]
}
var seq uint64
return func(r *Result) (err error) {
for range dec {
robin := seq % uint64(len(dec))
seq++
if err = dec[robin].Decode(r); err != nil {
continue
}
return nil
}
return err
}
}
// NewDecoder returns a new gob Decoder for the given io.Reader.
func NewDecoder(rd io.Reader) Decoder {
dec := gob.NewDecoder(rd)
return func(r *Result) error { return dec.Decode(r) }
}
// Decode is an an adapter method calling the Decoder function itself with the
// given parameters.
func (dec Decoder) Decode(r *Result) error { return dec(r) }
// An Encoder encodes a Result and returns an error in case of failure.
type Encoder func(*Result) error
// NewEncoder returns a new Result encoder closure for the given io.Writer
func NewEncoder(r io.Writer) Encoder {
enc := gob.NewEncoder(r)
return func(r *Result) error { return enc.Encode(r) }
}
// Encode is an an adapter method calling the Encoder function itself with the
// given parameters.
func (enc Encoder) Encode(r *Result) error { return enc(r) }
// NewCSVEncoder returns an Encoder that dumps the given *Result as a CSV
// record. The columns are: UNIX timestamp in ns since epoch,
// HTTP status code, request latency in ns, bytes out, bytes in,
// response body, and lastly the error.
func NewCSVEncoder(w io.Writer) Encoder {
enc := csv.NewWriter(w)
return func(r *Result) error {
err := enc.Write([]string{
strconv.FormatInt(r.Timestamp.UnixNano(), 10),
strconv.FormatUint(uint64(r.Code), 10),
strconv.FormatInt(r.Latency.Nanoseconds(), 10),
strconv.FormatUint(r.BytesOut, 10),
strconv.FormatUint(r.BytesIn, 10),
r.Error,
base64.StdEncoding.EncodeToString(r.Body),
r.Attack,
strconv.FormatUint(r.Seq, 10),
})
if err != nil {
return err
}
enc.Flush()
return enc.Error()
}
}
// NewCSVDecoder returns a Decoder that decodes CSV encoded Results.
func NewCSVDecoder(rd io.Reader) Decoder {
dec := csv.NewReader(rd)
dec.FieldsPerRecord = 9
dec.TrimLeadingSpace = true
return func(r *Result) error {
rec, err := dec.Read()
if err != nil {
return err
}
ts, err := strconv.ParseInt(rec[0], 10, 64)
if err != nil {
return err
}
r.Timestamp = time.Unix(0, ts)
code, err := strconv.ParseUint(rec[1], 10, 16)
if err != nil {
return err
}
r.Code = uint16(code)
latency, err := strconv.ParseInt(rec[2], 10, 64)
if err != nil {
return err
}
r.Latency = time.Duration(latency)
if r.BytesOut, err = strconv.ParseUint(rec[3], 10, 64); err != nil {
return err
}
if r.BytesIn, err = strconv.ParseUint(rec[4], 10, 64); err != nil {
return err
}
r.Error = rec[5]
r.Body, err = base64.StdEncoding.DecodeString(rec[6])
r.Attack = rec[7]
if r.Seq, err = strconv.ParseUint(rec[8], 10, 64); err != nil {
return err
}
return err
}
}
// NewJSONEncoder returns an Encoder that dumps the given *Results as a JSON
// object.
func NewJSONEncoder(w io.Writer) Encoder {
var jw jwriter.Writer
return func(r *Result) error {
(*jsonResult)(r).encode(&jw)
if jw.Error != nil {
return jw.Error
}
jw.RawByte('\n')
_, err := jw.DumpTo(w)
return err
}
}
// NewJSONDecoder returns a Decoder that decodes JSON encoded Results.
func NewJSONDecoder(r io.Reader) Decoder {
rd := bufio.NewReader(r)
return func(r *Result) (err error) {
var jl jlexer.Lexer
if jl.Data, err = rd.ReadSlice('\n'); err != nil {
return err
}
(*jsonResult)(r).decode(&jl)
return jl.Error()
}
}

View File

@ -0,0 +1,165 @@
// This file has been modified from the original generated code to make it work with
// type alias jsonResult so that the methods aren't exposed in Result.
package vegeta
import (
"time"
"github.com/mailru/easyjson/jlexer"
"github.com/mailru/easyjson/jwriter"
)
type jsonResult Result
func (r *jsonResult) decode(in *jlexer.Lexer) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeString()
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "attack":
r.Attack = string(in.String())
case "seq":
r.Seq = uint64(in.Uint64())
case "code":
r.Code = uint16(in.Uint16())
case "timestamp":
if data := in.Raw(); in.Ok() {
in.AddError((r.Timestamp).UnmarshalJSON(data))
}
case "latency":
r.Latency = time.Duration(in.Int64())
case "bytes_out":
r.BytesOut = uint64(in.Uint64())
case "bytes_in":
r.BytesIn = uint64(in.Uint64())
case "error":
r.Error = string(in.String())
case "body":
if in.IsNull() {
in.Skip()
r.Body = nil
} else {
r.Body = in.Bytes()
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func (r jsonResult) encode(out *jwriter.Writer) {
out.RawByte('{')
first := true
_ = first
{
const prefix string = ",\"attack\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(r.Attack))
}
{
const prefix string = ",\"seq\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint64(uint64(r.Seq))
}
{
const prefix string = ",\"code\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint16(uint16(r.Code))
}
{
const prefix string = ",\"timestamp\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Raw((r.Timestamp).MarshalJSON())
}
{
const prefix string = ",\"latency\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(r.Latency))
}
{
const prefix string = ",\"bytes_out\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint64(uint64(r.BytesOut))
}
{
const prefix string = ",\"bytes_in\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint64(uint64(r.BytesIn))
}
{
const prefix string = ",\"error\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(r.Error))
}
{
const prefix string = ",\"body\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Base64Bytes(r.Body)
}
out.RawByte('}')
}

371
vendor/github.com/tsenart/vegeta/lib/targets.go generated vendored Normal file
View File

@ -0,0 +1,371 @@
package vegeta
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"sync/atomic"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
// Target is an HTTP request blueprint.
//
//go:generate go run ../internal/cmd/jsonschema/main.go -type=Target -output=target.schema.json
type Target struct {
Method string `json:"method"`
URL string `json:"url"`
Body []byte `json:"body,omitempty"`
Header http.Header `json:"header,omitempty"`
}
// Request creates an *http.Request out of Target and returns it along with an
// error in case of failure.
func (t *Target) Request() (*http.Request, error) {
req, err := http.NewRequest(t.Method, t.URL, bytes.NewReader(t.Body))
if err != nil {
return nil, err
}
for k, vs := range t.Header {
req.Header[k] = make([]string, len(vs))
copy(req.Header[k], vs)
}
if host := req.Header.Get("Host"); host != "" {
req.Host = host
}
return req, nil
}
// Equal returns true if the target is equal to the other given target.
func (t *Target) Equal(other *Target) bool {
switch {
case t == other:
return true
case t == nil || other == nil:
return false
default:
equal := t.Method == other.Method &&
t.URL == other.URL &&
bytes.Equal(t.Body, other.Body) &&
len(t.Header) == len(other.Header)
if !equal {
return false
}
for k := range t.Header {
left, right := t.Header[k], other.Header[k]
if len(left) != len(right) {
return false
}
for i := range left {
if left[i] != right[i] {
return false
}
}
}
return true
}
}
var (
// ErrNoTargets is returned when not enough Targets are available.
ErrNoTargets = errors.New("no targets to attack")
// ErrNilTarget is returned when the passed Target pointer is nil.
ErrNilTarget = errors.New("nil target")
// ErrNoMethod is returned by JSONTargeter when a parsed Target has
// no method.
ErrNoMethod = errors.New("target: required method is missing")
// ErrNoURL is returned by JSONTargeter when a parsed Target has no
// URL.
ErrNoURL = errors.New("target: required url is missing")
// TargetFormats contains the canonical list of the valid target
// format identifiers.
TargetFormats = []string{HTTPTargetFormat, JSONTargetFormat}
)
const (
// HTTPTargetFormat is the human readable identifier for the HTTP target format.
HTTPTargetFormat = "http"
// JSONTargetFormat is the human readable identifier for the JSON target format.
JSONTargetFormat = "json"
)
// A Targeter decodes a Target or returns an error in case of failure.
// Implementations must be safe for concurrent use.
type Targeter func(*Target) error
// Decode is a convenience method that calls the underlying Targeter function.
func (tr Targeter) Decode(t *Target) error {
return tr(t)
}
// NewJSONTargeter returns a new targeter that decodes one Target from the
// given io.Reader on every invocation. Each target is one JSON object in its own line.
//
// The method and url fields are required. If present, the body field must be base64 encoded.
// The generated [JSON Schema](lib/target.schema.json) defines the format in detail.
//
// {"method":"POST", "url":"https://goku/1", "header":{"Content-Type":["text/plain"], "body": "Rk9P"}
// {"method":"GET", "url":"https://goku/2"}
//
// body will be set as the Target's body if no body is provided in each target definiton.
// hdr will be merged with the each Target's headers.
//
func NewJSONTargeter(src io.Reader, body []byte, header http.Header) Targeter {
type reader struct {
*bufio.Reader
sync.Mutex
}
rd := reader{Reader: bufio.NewReader(src)}
return func(tgt *Target) (err error) {
if tgt == nil {
return ErrNilTarget
}
var jl jlexer.Lexer
rd.Lock()
for len(jl.Data) == 0 {
if jl.Data, err = rd.ReadBytes('\n'); err != nil {
break
}
jl.Data = bytes.TrimSpace(jl.Data) // Skip empty lines
}
rd.Unlock()
if err != nil {
if err == io.EOF {
err = ErrNoTargets
}
return err
}
var t jsonTarget
t.decode(&jl)
if err = jl.Error(); err != nil {
return err
} else if t.Method == "" {
return ErrNoMethod
} else if t.URL == "" {
return ErrNoURL
}
tgt.Method = t.Method
tgt.URL = t.URL
if tgt.Body = body; len(t.Body) > 0 {
tgt.Body = t.Body
}
if tgt.Header == nil {
tgt.Header = http.Header{}
}
for k, vs := range header {
tgt.Header[k] = append(tgt.Header[k], vs...)
}
for k, vs := range t.Header {
tgt.Header[k] = append(tgt.Header[k], vs...)
}
return nil
}
}
// A TargetEncoder encodes a Target in a format that can be read by a Targeter.
type TargetEncoder func(*Target) error
// Encode is a convenience method that calls the underlying TargetEncoder function.
func (enc TargetEncoder) Encode(t *Target) error {
return enc(t)
}
// NewJSONTargetEncoder returns a TargetEncoder that encods Targets in the JSON format.
func NewJSONTargetEncoder(w io.Writer) TargetEncoder {
var jw jwriter.Writer
return func(t *Target) error {
(*jsonTarget)(t).encode(&jw)
if jw.Error != nil {
return jw.Error
}
jw.RawByte('\n')
_, err := jw.DumpTo(w)
return err
}
}
// NewStaticTargeter returns a Targeter which round-robins over the passed
// Targets.
func NewStaticTargeter(tgts ...Target) Targeter {
i := int64(-1)
return func(tgt *Target) error {
if tgt == nil {
return ErrNilTarget
}
*tgt = tgts[atomic.AddInt64(&i, 1)%int64(len(tgts))]
return nil
}
}
// ReadAllTargets eagerly reads all Targets out of the provided Targeter.
func ReadAllTargets(t Targeter) (tgts []Target, err error) {
for {
var tgt Target
if err = t(&tgt); err == ErrNoTargets {
break
} else if err != nil {
return nil, err
}
tgts = append(tgts, tgt)
}
if len(tgts) == 0 {
return nil, ErrNoTargets
}
return tgts, nil
}
// NewHTTPTargeter returns a new Targeter that decodes one Target from the
// given io.Reader on every invocation. The format is as follows:
//
// GET https://foo.bar/a/b/c
// Header-X: 123
// Header-Y: 321
// @/path/to/body/file
//
// POST https://foo.bar/b/c/a
// Header-X: 123
//
// body will be set as the Target's body if no body is provided.
// hdr will be merged with the each Target's headers.
func NewHTTPTargeter(src io.Reader, body []byte, hdr http.Header) Targeter {
var mu sync.Mutex
sc := peekingScanner{src: bufio.NewScanner(src)}
return func(tgt *Target) (err error) {
mu.Lock()
defer mu.Unlock()
if tgt == nil {
return ErrNilTarget
}
var line string
for {
if !sc.Scan() {
return ErrNoTargets
}
line = strings.TrimSpace(sc.Text())
if len(line) != 0 && line[0] != '#'{
break
}
}
tgt.Body = body
tgt.Header = http.Header{}
for k, vs := range hdr {
tgt.Header[k] = vs
}
tokens := strings.SplitN(line, " ", 2)
if len(tokens) < 2 {
return fmt.Errorf("bad target: %s", line)
}
if !startsWithHTTPMethod(line) {
return fmt.Errorf("bad method: %s", tokens[0])
}
tgt.Method = tokens[0]
if _, err = url.ParseRequestURI(tokens[1]); err != nil {
return fmt.Errorf("bad URL: %s", tokens[1])
}
tgt.URL = tokens[1]
line = strings.TrimSpace(sc.Peek())
if line == "" || startsWithHTTPMethod(line) {
return nil
}
for sc.Scan() {
if line = strings.TrimSpace(sc.Text()); line == "" {
break
} else if strings.HasPrefix(line, "@") {
if tgt.Body, err = ioutil.ReadFile(line[1:]); err != nil {
return fmt.Errorf("bad body: %s", err)
}
break
}
tokens = strings.SplitN(line, ":", 2)
if len(tokens) < 2 {
return fmt.Errorf("bad header: %s", line)
}
for i := range tokens {
if tokens[i] = strings.TrimSpace(tokens[i]); tokens[i] == "" {
return fmt.Errorf("bad header: %s", line)
}
}
// Add key/value directly to the http.Header (map[string][]string).
// http.Header.Add() canonicalizes keys but vegeta is used
// to test systems that require case-sensitive headers.
tgt.Header[tokens[0]] = append(tgt.Header[tokens[0]], tokens[1])
}
if err = sc.Err(); err != nil {
return ErrNoTargets
}
return nil
}
}
var httpMethodChecker = regexp.MustCompile("^[A-Z]+\\s")
// A line starts with an http method when the first word is uppercase ascii
// followed by a space.
func startsWithHTTPMethod(t string) bool {
return httpMethodChecker.MatchString(t)
}
// Wrap a Scanner so we can cheat and look at the next value and react accordingly,
// but still have it be around the next time we Scan() + Text()
type peekingScanner struct {
src *bufio.Scanner
peeked string
}
func (s *peekingScanner) Err() error {
return s.src.Err()
}
func (s *peekingScanner) Peek() string {
if !s.src.Scan() {
return ""
}
s.peeked = s.src.Text()
return s.peeked
}
func (s *peekingScanner) Scan() bool {
if s.peeked == "" {
return s.src.Scan()
}
return true
}
func (s *peekingScanner) Text() string {
if s.peeked == "" {
return s.src.Text()
}
t := s.peeked
s.peeked = ""
return t
}

View File

@ -0,0 +1,167 @@
// This file has been modified from the original generated code to make it work with
// type alias jsonTarget so that the methods aren't exposed in Target.
package vegeta
import (
http "net/http"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
type jsonTarget Target
func (t *jsonTarget) decode(in *jlexer.Lexer) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeString()
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "method":
t.Method = string(in.String())
case "url":
t.URL = string(in.String())
case "body":
if in.IsNull() {
in.Skip()
t.Body = nil
} else {
t.Body = in.Bytes()
}
case "header":
if in.IsNull() {
in.Skip()
} else {
in.Delim('{')
if !in.IsDelim('}') {
t.Header = make(http.Header)
} else {
t.Header = nil
}
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()
var v2 []string
if in.IsNull() {
in.Skip()
v2 = nil
} else {
in.Delim('[')
if v2 == nil {
if !in.IsDelim(']') {
v2 = make([]string, 0, 4)
} else {
v2 = []string{}
}
} else {
v2 = (v2)[:0]
}
for !in.IsDelim(']') {
var v3 string
v3 = string(in.String())
v2 = append(v2, v3)
in.WantComma()
}
in.Delim(']')
}
(t.Header)[key] = v2
in.WantComma()
}
in.Delim('}')
}
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func (t jsonTarget) encode(out *jwriter.Writer) {
out.RawByte('{')
first := true
_ = first
{
const prefix string = ",\"method\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(t.Method))
}
{
const prefix string = ",\"url\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(t.URL))
}
if len(t.Body) != 0 {
const prefix string = ",\"body\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Base64Bytes(t.Body)
}
if len(t.Header) != 0 {
const prefix string = ",\"header\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
{
out.RawByte('{')
v6First := true
for v6Name, v6Value := range t.Header {
if v6First {
v6First = false
} else {
out.RawByte(',')
}
out.String(string(v6Name))
out.RawByte(':')
if v6Value == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 {
out.RawString("null")
} else {
out.RawByte('[')
for v7, v8 := range v6Value {
if v7 > 0 {
out.RawByte(',')
}
out.String(string(v8))
}
out.RawByte(']')
}
}
out.RawByte('}')
}
}
out.RawByte('}')
}