Change the service profile assertion conditions (#2785)

With the server configured to response with a failure of 50%, the test first
checks to ensure the actual success rate is less than 100%. Then the
service profile is edited to perform retries. The test then checks to
ensure the effective success rate is at least 95%.

This is (hopefully) more reliable than changing the test to perform waits and
retries until there is a difference between effective success rate and actual
success rate and compare them.

Signed-off-by: Ivan Sim <ivan@buoyant.io>
This commit is contained in:
Ivan Sim 2019-05-03 08:37:48 -07:00 committed by GitHub
parent 7829fcc28d
commit 00a94be073
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 91 additions and 200 deletions

View File

@ -123,96 +123,82 @@ func TestServiceProfiles(t *testing.T) {
}
func TestServiceProfileMetrics(t *testing.T) {
var (
testNamespace = TestHelper.GetTestNamespace("serviceprofile-test")
testSP = "world-svc"
testDownstreamDeploy = "deployment/world"
testUpstreamDeploy = "deployment/hello"
testYAML = "testdata/hello_world.yaml"
)
testNamespace := TestHelper.GetTestNamespace("serviceprofile-test")
testCases := []string{
"retries",
"latency",
out, stderr, err := TestHelper.LinkerdRun("inject", "--manual", testYAML)
if err != nil {
t.Errorf("'linkerd %s' command failed with %s: %s\n", "inject", err.Error(), stderr)
}
for _, tc := range testCases {
var (
tc = tc
testSP = fmt.Sprintf("world-%s-svc", tc)
testDownstreamDeploy = fmt.Sprintf("deployment/world-%s", tc)
testUpstreamDeploy = fmt.Sprintf("deployment/hello-%s", tc)
testYAML = fmt.Sprintf("testdata/hello_world_%s.yaml", tc)
)
t.Run(tc, func(t *testing.T) {
out, stderr, err := TestHelper.LinkerdRun("inject", "--manual", testYAML)
if err != nil {
t.Errorf("'linkerd %s' command failed with %s: %s\n", "inject", err.Error(), stderr)
}
out, err = TestHelper.KubectlApply(out, testNamespace)
if err != nil {
t.Errorf("kubectl apply command failed\n%s", out)
}
cmd := []string{
"profile",
"--namespace",
testNamespace,
"--open-api",
"testdata/world.swagger",
testSP,
}
out, stderr, err = TestHelper.LinkerdRun(cmd...)
if err != nil {
t.Errorf("'linkerd %s' command failed with %s: %s\n", cmd, err.Error(), stderr)
}
_, err = TestHelper.KubectlApply(out, testNamespace)
if err != nil {
t.Errorf("kubectl apply command failed:\n%s", err)
}
assertRouteStat(testUpstreamDeploy, testNamespace, testDownstreamDeploy, t, func(stat *cmd2.JSONRouteStats) error {
if *stat.EffectiveSuccess != *stat.ActualSuccess {
return fmt.Errorf(
"expected Effective Success to be equal to Actual Success but got: Effective [%f] <> Actual [%f]",
*stat.EffectiveSuccess, *stat.ActualSuccess)
}
return nil
})
profile := &sp.ServiceProfile{}
// Grab the output and convert it to a service profile object for modification
err = yaml.Unmarshal([]byte(out), profile)
if err != nil {
t.Errorf("unable to unmarshall YAML: %s", err.Error())
}
for _, route := range profile.Spec.Routes {
if route.Name == "GET /testpath" {
route.IsRetryable = true
break
}
}
bytes, err := yaml.Marshal(profile)
if err != nil {
t.Errorf("error marshalling service profile: %s", bytes)
}
out, err = TestHelper.KubectlApply(string(bytes), testNamespace)
if err != nil {
t.Errorf("kubectl apply command failed:\n%s :%s", err, out)
}
assertRouteStat(testUpstreamDeploy, testNamespace, testDownstreamDeploy, t, func(stat *cmd2.JSONRouteStats) error {
if *stat.EffectiveSuccess <= *stat.ActualSuccess {
return fmt.Errorf(
"expected Effective Success to be greater than Actual Success but got: Effective [%f] <> Actual [%f]",
*stat.EffectiveSuccess, *stat.ActualSuccess)
}
return nil
})
})
out, err = TestHelper.KubectlApply(out, testNamespace)
if err != nil {
t.Errorf("kubectl apply command failed\n%s", out)
}
cmd := []string{
"profile",
"--namespace",
testNamespace,
"--open-api",
"testdata/world.swagger",
testSP,
}
out, stderr, err = TestHelper.LinkerdRun(cmd...)
if err != nil {
t.Errorf("'linkerd %s' command failed with %s: %s\n", cmd, err.Error(), stderr)
}
_, err = TestHelper.KubectlApply(out, testNamespace)
if err != nil {
t.Errorf("kubectl apply command failed:\n%s", err)
}
assertRouteStat(testUpstreamDeploy, testNamespace, testDownstreamDeploy, t, func(stat *cmd2.JSONRouteStats) error {
if *stat.ActualSuccess == 100.00 {
return fmt.Errorf("expected Actual Success to be less than 100%% due to pre-seeded failure rate. But got %0.2f", *stat.ActualSuccess)
}
return nil
})
profile := &sp.ServiceProfile{}
// Grab the output and convert it to a service profile object for modification
err = yaml.Unmarshal([]byte(out), profile)
if err != nil {
t.Errorf("unable to unmarshall YAML: %s", err.Error())
}
// introduce retry in the service profile
for _, route := range profile.Spec.Routes {
if route.Name == "GET /testpath" {
route.IsRetryable = true
break
}
}
bytes, err := yaml.Marshal(profile)
if err != nil {
t.Errorf("error marshalling service profile: %s", bytes)
}
out, err = TestHelper.KubectlApply(string(bytes), testNamespace)
if err != nil {
t.Errorf("kubectl apply command failed:\n%s :%s", err, out)
}
assertRouteStat(testUpstreamDeploy, testNamespace, testDownstreamDeploy, t, func(stat *cmd2.JSONRouteStats) error {
if *stat.EffectiveSuccess < 0.95 {
return fmt.Errorf("expected Effective Success to be at least 95%% with retries enabled. But got %.2f", *stat.ActualSuccess)
}
return nil
})
}
func assertRouteStat(upstream, namespace, downstream string, t *testing.T, assertFn func(stat *cmd2.JSONRouteStats) error) {
@ -280,6 +266,7 @@ func getRoutes(deployName, namespace string, additionalArgs []string) ([]*cmd2.J
if err != nil {
return nil, err
}
var list map[string][]*cmd2.JSONRouteStats
err = yaml.Unmarshal([]byte(out), &list)
if err != nil {

View File

@ -1,24 +1,24 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: hello-latency
name: hello
spec:
replicas: 1
selector:
matchLabels:
app: hello-latency
app: hello
template:
metadata:
labels:
app: hello-latency
app: hello
spec:
containers:
- name: hello-latency
- name: hello
image: buoyantio/helloworld:0.1.6
args:
- "-addr=:8888"
- "-text=Hello"
- "-target=world-latency-svc:8889/testpath"
- "-target=world-svc:8889/testpath"
ports:
- name: service
containerPort: 8888
@ -26,10 +26,10 @@ spec:
apiVersion: v1
kind: Service
metadata:
name: hello-latency-svc
name: hello-svc
spec:
selector:
app: hello-latency
app: hello
ports:
- name: http
port: 8888
@ -38,25 +38,24 @@ spec:
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: world-latency
name: world
spec:
replicas: 1
selector:
matchLabels:
app: world-latency
app: world
template:
metadata:
labels:
app: world-latency
app: world
spec:
containers:
- name: world-latency
- name: world
image: buoyantio/helloworld:0.1.6
args:
- "-addr=:8889"
- "-text=World"
- "-failure-rate=0.2"
- "-latency=3s"
- "-failure-rate=0.5"
ports:
- name: service
containerPort: 8889
@ -64,10 +63,10 @@ spec:
apiVersion: v1
kind: Service
metadata:
name: world-latency-svc
name: world-svc
spec:
selector:
app: world-latency
app: world
ports:
- name: http
port: 8889
@ -76,15 +75,15 @@ spec:
apiVersion: batch/v1
kind: Job
metadata:
name: hello-latency-slow-cooker
name: hello-slow-cooker
spec:
template:
metadata:
labels:
app: hello-latency-slow-cooker
app: hello-slow-cooker
spec:
containers:
- name: hello-latency-slow-cooker
- name: hello-slow-cooker
image: buoyantio/slow_cooker:1.1.1
command:
- "/bin/sh"
@ -92,5 +91,5 @@ spec:
- "-c"
- |
sleep 15 # wait for pods to start
slow_cooker -metric-addr 0.0.0.0:9998 http://hello-latency-svc:8888/testpath
slow_cooker -metric-addr 0.0.0.0:9998 http://hello-svc:8888/testpath
restartPolicy: OnFailure

View File

@ -1,95 +0,0 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: hello-retries
spec:
replicas: 1
selector:
matchLabels:
app: hello-retries
template:
metadata:
labels:
app: hello-retries
spec:
containers:
- name: hello-retries
image: buoyantio/helloworld:0.1.6
args:
- "-addr=:7777"
- "-text=Hello"
- "-target=world-retries-svc:7778/testpath"
ports:
- name: service
containerPort: 7777
---
apiVersion: v1
kind: Service
metadata:
name: hello-retries-svc
spec:
selector:
app: hello-retries
ports:
- name: http
port: 7777
targetPort: 7777
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: world-retries
spec:
replicas: 1
selector:
matchLabels:
app: world-retries
template:
metadata:
labels:
app: world-retries
spec:
containers:
- name: world-retries
image: buoyantio/helloworld:0.1.6
args:
- "-addr=:7778"
- "-text=World"
- "-failure-rate=0.5"
ports:
- name: service
containerPort: 7778
---
apiVersion: v1
kind: Service
metadata:
name: world-retries-svc
spec:
selector:
app: world-retries
ports:
- name: http
port: 7778
targetPort: 7778
---
apiVersion: batch/v1
kind: Job
metadata:
name: hello-retries-slow-cooker
spec:
template:
metadata:
labels:
app: hello-retries-slow-cooker
spec:
containers:
- name: hello-retries-slow-cooker
image: buoyantio/slow_cooker:1.1.1
command:
- "/bin/sh"
args:
- "-c"
- |
sleep 15 # wait for pods to start
slow_cooker -metric-addr 0.0.0.0:9998 http://hello-retries-svc:7777/testpath
restartPolicy: OnFailure