Update docker-compose environment to match prod (#609)

The Prometheus config in the docker-compose environment had fallen
behind the prod setup.

This change updates the docker-compose environment in the following
ways:
- Prometheus config more closely matches prod, based on #583
- simulate-proxy labels matches prod, based on #605
- add Grafana container

Signed-off-by: Andrew Seigner <siggy@buoyant.io>
This commit is contained in:
Andrew Seigner 2018-03-23 17:00:39 -07:00 committed by GitHub
parent 291d8e97ab
commit 12c6531546
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 96 additions and 72 deletions

View File

@ -4,37 +4,28 @@ global:
scrape_interval: 10s
scrape_configs:
- job_name: 'prometheus'
- job_name: 'conduit-controller'
static_configs:
- targets: ['localhost:9090']
- targets:
- 'destination:9999'
- 'prometheus:9090'
- 'proxy-api:9996'
- 'public-api:9995'
- 'tap:9998'
- 'telemetry:9997'
- 'web:9994'
relabel_configs:
- action: labelmap
regex: __address__
replacement: component
- action: replace
source_labels: [component]
regex: ^(.*):.*$
target_label: component
- job_name: 'web'
- job_name: 'conduit-proxy'
static_configs:
- targets: ['web:9994']
- job_name: 'public-api'
static_configs:
- targets: ['public-api:9995']
- job_name: 'proxy-api'
static_configs:
- targets: ['proxy-api:9996']
- job_name: 'telemetry'
static_configs:
- targets: ['telemetry:9997']
- job_name: 'tap'
static_configs:
- targets: ['tap:9998']
- job_name: 'destination'
static_configs:
- targets: ['destination:9999']
- job_name: 'conduit'
static_configs:
- targets:
- 'simulate-proxy:9000'
- 'simulate-proxy:9001'
- 'simulate-proxy:9002'
- targets:
- 'simulate-proxy:9000'
- 'simulate-proxy:9001'
- 'simulate-proxy:9002'

View File

@ -41,7 +41,33 @@ type proxyMetricCollectors struct {
}
var (
labels = generatePromLabels()
// for reference: https://github.com/runconduit/conduit/blob/master/doc/proxy-metrics.md#labels
labels = []string{
// kubeResourceTypes
"k8s_daemon_set",
"k8s_deployment",
"k8s_job",
"k8s_replication_controller",
"k8s_replica_set",
"k8s_pod_template_hash",
"namespace",
// constantLabels
"direction",
"authority",
"status_code",
"grpc_status_code",
// destinationLabels
"dst_daemon_set",
"dst_deployment",
"dst_job",
"dst_replication_controller",
"dst_replica_set",
"dst_namespace",
}
grpcResponseCodes = []codes.Code{
codes.OK,
codes.PermissionDenied,
@ -170,10 +196,10 @@ func (s *simulatedProxy) generateProxyTraffic() {
// newConduitLabel creates a label map to be used for metric generation.
func (s *simulatedProxy) newConduitLabel(destinationPod string, isResponseLabel bool) prom.Labels {
labelMap := prom.Labels{
"direction": randomRequestDirection(),
"deployment": s.deploymentName,
"authority": "world.greeting:7778",
"namespace": s.namespace,
"direction": randomRequestDirection(),
"k8s_deployment": s.deploymentName,
"authority": "world.greeting:7778",
"namespace": s.namespace,
}
if labelMap["direction"] == "outbound" {
labelMap["dst_deployment"] = destinationPod
@ -209,30 +235,6 @@ func randomRequestDirection() string {
return "outbound"
}
func generatePromLabels() []string {
kubeResourceTypes := []string{
"job",
"replica_set",
"deployment",
"daemon_set",
"replication_controller",
"namespace",
}
constantLabels := []string{
"direction",
"authority",
"status_code",
"grpc_status_code",
}
destinationLabels := make([]string, len(kubeResourceTypes))
for i, label := range kubeResourceTypes {
destinationLabels[i] = fmt.Sprintf("dst_%s", label)
}
return append(append(constantLabels, kubeResourceTypes...), destinationLabels...)
}
// overrideDefaultLabels combines two maps of the same size with the keys
// map1 values take precedence during the union
func overrideDefaultLabels(map1 map[string]string) map[string]string {
@ -412,18 +414,14 @@ func main() {
randomPodOwner := getRandomDeployment(deployments, excludedDeployments)
excludedDeployments[randomPodOwner] = struct{}{}
go func(address string, podOwner string, deployments []string) {
proxy := newSimulatedProxy(podOwner, deployments, sleep)
server := &http.Server{
Addr: address,
Handler: promhttp.HandlerFor(proxy.registerer, promhttp.HandlerOpts{}),
}
log.Infof("serving scrapable metrics on %s", address)
go server.ListenAndServe()
go proxy.generateProxyTraffic()
}(addr, randomPodOwner, deployments)
proxy := newSimulatedProxy(randomPodOwner, deployments, sleep)
server := &http.Server{
Addr: addr,
Handler: promhttp.HandlerFor(proxy.registerer, promhttp.HandlerOpts{}),
}
log.Infof("serving scrapable metrics on %s", addr)
go server.ListenAndServe()
go proxy.generateProxyTraffic()
}
<-stopCh
}

View File

@ -111,6 +111,15 @@ services:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.retention=6h
grafana:
image: grafana/grafana:5.0.3
ports:
- 3000:3000
volumes:
# TODO: find a way to share the dashboard json files, currently in cli/install/*.go
- ./grafana/dev.grafana.ini:/etc/grafana/grafana.ini:ro
- ./grafana/dev.datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:ro
simulate-proxy:
image: golang:1.10.0-alpine3.7
ports:

View File

@ -0,0 +1,12 @@
apiVersion: 1
datasources:
- name: prometheus
type: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
isDefault: true
jsonData:
timeInterval: "5s"
version: 1
editable: true

14
grafana/dev.grafana.ini Normal file
View File

@ -0,0 +1,14 @@
instance_name = conduit-grafana
[auth]
disable_login_form = true
[auth.anonymous]
enabled = true
org_role = Editor
[auth.basic]
enabled = false
[analytics]
check_for_updates = false