Compare commits
16 Commits
Author | SHA1 | Date |
---|---|---|
|
a31f41ba2e | |
|
798c568c1e | |
|
309d461899 | |
|
e307ceed02 | |
|
b5f0227e4b | |
|
6818bcd0f2 | |
|
04d66b9640 | |
|
62a81cbb7e | |
|
a99be7183c | |
|
d591dcf372 | |
|
72aa97f41b | |
|
833ff93c57 | |
|
d305b2e6f1 | |
|
ee2afb33be | |
|
8cc5cc0bfa | |
|
0f9c6a8413 |
|
@ -1,10 +1,36 @@
|
|||
name: Test, format and lint
|
||||
name: Build, test, format and lint
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build binary for ${{ matrix.platform.name }}
|
||||
strategy:
|
||||
matrix:
|
||||
platform:
|
||||
- name: linux on amd64
|
||||
task: build-linux-amd64
|
||||
- name: linux on arm64
|
||||
task: build-linux-arm64
|
||||
- name: osx on amd64
|
||||
task: build-darwin-amd64
|
||||
- name: osx on arm64
|
||||
task: build-darwin-arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Build binaries
|
||||
run: make ${{ matrix.platform.task }}
|
||||
|
||||
format-lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Format and lint
|
||||
|
@ -15,7 +41,7 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.17
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Install tools
|
||||
run: make install-tools
|
||||
|
@ -36,7 +62,7 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.17
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Run e2e test
|
||||
run: make e2e-tests
|
||||
|
@ -51,7 +77,7 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.17
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Run unit test
|
||||
run: make test
|
||||
|
@ -66,10 +92,10 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.17
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Setup database
|
||||
run: docker run --rm -d -p9000:9000 --name test-clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:21
|
||||
run: docker run --rm -d -p9000:9000 --name test-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:22
|
||||
|
||||
- name: Run integration tests
|
||||
run: make integration-test
|
||||
|
|
|
@ -15,10 +15,10 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.17
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Create release distribution
|
||||
run: make build tar
|
||||
run: make build-all-platforms tar-all-platforms
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
|
||||
|
|
|
@ -125,6 +125,10 @@ issues:
|
|||
- text: "G402:"
|
||||
linters:
|
||||
- gosec
|
||||
- path: grpc_test.go
|
||||
linters:
|
||||
# See https://github.com/golangci/golangci-lint/issues/2286
|
||||
- typecheck
|
||||
|
||||
# The list of ids of default excludes to include or disable. By default it's empty.
|
||||
# See the list of default excludes here https://golangci-lint.run/usage/configuration.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.13
|
||||
FROM docker.io/library/alpine:3.16
|
||||
|
||||
ADD jaeger-clickhouse-linux-amd64 /go/bin/jaeger-clickhouse
|
||||
|
||||
|
|
46
Makefile
46
Makefile
|
@ -3,7 +3,7 @@ GOARCH ?= $(shell go env GOARCH)
|
|||
GOBUILD=CGO_ENABLED=0 installsuffix=cgo go build -trimpath
|
||||
|
||||
TOOLS_MOD_DIR = ./internal/tools
|
||||
JAEGER_VERSION ?= 1.24.0
|
||||
JAEGER_VERSION ?= 1.32.0
|
||||
|
||||
DOCKER_REPO ?= ghcr.io/jaegertracing/jaeger-clickhouse
|
||||
DOCKER_TAG ?= latest
|
||||
|
@ -12,6 +12,25 @@ DOCKER_TAG ?= latest
|
|||
build:
|
||||
${GOBUILD} -o jaeger-clickhouse-$(GOOS)-$(GOARCH) ./cmd/jaeger-clickhouse/main.go
|
||||
|
||||
.PHONY: build-linux-amd64
|
||||
build-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
.PHONY: build-linux-arm64
|
||||
build-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) build
|
||||
|
||||
.PHONY: build-darwin-amd64
|
||||
build-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) build
|
||||
|
||||
.PHONY: build-darwin-arm64
|
||||
build-darwin-arm64:
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) build
|
||||
|
||||
.PHONY: build-all-platforms
|
||||
build-all-platforms: build-linux-amd64 build-linux-arm64 build-darwin-amd64 build-darwin-arm64
|
||||
|
||||
.PHONY: e2e-tests
|
||||
e2e-tests:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
@ -28,11 +47,11 @@ run-hotrod:
|
|||
.PHONY: fmt
|
||||
fmt:
|
||||
go fmt ./...
|
||||
goimports -w -local github.com/jaegertracing/jaeger-clickhouse ./
|
||||
goimports -w -local github.com/jaegertracing/jaeger-clickhouse ./
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint run --allow-parallel-runners ./...
|
||||
golangci-lint -v run --allow-parallel-runners ./...
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
|
@ -42,13 +61,32 @@ test:
|
|||
integration-test: build
|
||||
STORAGE=grpc-plugin \
|
||||
PLUGIN_BINARY_PATH=$(PWD)/jaeger-clickhouse-linux-amd64 \
|
||||
PLUGIN_CONFIG_PATH=$(PWD)/integration/integration_config.yaml \
|
||||
PLUGIN_CONFIG_PATH=$(PWD)/integration/config-local.yaml \
|
||||
go test ./integration
|
||||
|
||||
.PHONY: tar
|
||||
tar:
|
||||
tar -czvf jaeger-clickhouse-$(GOOS)-$(GOARCH).tar.gz jaeger-clickhouse-$(GOOS)-$(GOARCH) config.yaml
|
||||
|
||||
.PHONY: tar-linux-amd64
|
||||
tar-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-linux-arm64
|
||||
tar-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-darwin-amd64
|
||||
tar-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-darwin-arm64
|
||||
tar-darwin-arm64:
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-all-platforms
|
||||
tar-all-platforms: tar-linux-amd64 tar-linux-arm64 tar-darwin-amd64 tar-darwin-arm64
|
||||
|
||||
.PHONY: docker
|
||||
docker: build
|
||||
docker build -t ${DOCKER_REPO}:${DOCKER_TAG} -f Dockerfile .
|
||||
|
|
29
README.md
29
README.md
|
@ -1,23 +1,24 @@
|
|||
# Jaeger ClickHouse
|
||||
# Jaeger ClickHouse (experimental)
|
||||
|
||||
This is implementation of Jaeger's [storage plugin](https://github.com/jaegertracing/jaeger/tree/master/plugin/storage/grpc) for ClickHouse.
|
||||
See as well [jaegertracing/jaeger/issues/1438](https://github.com/jaegertracing/jaeger/issues/1438) for historical discussion regarding Clickhouse plugin.
|
||||
⚠️ This module only implements grpc-plugin API that has been deprecated in Jaeger (https://github.com/jaegertracing/jaeger/issues/4647).
|
||||
|
||||
This is a [Jaeger gRPC storage plugin](https://github.com/jaegertracing/jaeger/tree/master/plugin/storage/grpc) implementation for storing traces in ClickHouse.
|
||||
|
||||
## Project status
|
||||
|
||||
Jaeger ClickHouse is a community-driven project, we would love to hear your feature requests.
|
||||
Pull requests also will be greatly appreciated.
|
||||
This is a community-driven project, and we would love to hear your issues and feature requests.
|
||||
Pull requests are also greatly appreciated.
|
||||
|
||||
## Why use ClickHouse for Jaeger?
|
||||
|
||||
[ClickHouse](https://github.com/clickhouse/clickhouse) is an analytical column-oriented database management system.
|
||||
It is designed to analyze streams of clicks which are kind of resemblant to spans.
|
||||
[ClickHouse](https://clickhouse.com) is an analytical column-oriented database management system.
|
||||
It is designed to analyze streams of events which are kind of resemblant to spans.
|
||||
It's open-source, optimized for performance, and actively developed.
|
||||
|
||||
## How it works?
|
||||
## How it works
|
||||
|
||||
Jaeger spans are stored in 2 tables. First one contains whole span encoded either in JSON or Protobuf.
|
||||
Second stores key information about spans for searching. This table is indexed by span duration and tags.
|
||||
Jaeger spans are stored in 2 tables. The first contains the whole span encoded either in JSON or Protobuf.
|
||||
The second stores key information about spans for searching. This table is indexed by span duration and tags.
|
||||
Also, info about operations is stored in the materialized view. There are not indexes for archived spans.
|
||||
Storing data in replicated local tables with distributed global tables is natively supported. Spans are bufferized.
|
||||
Span buffers are flushed to DB either by timer or after reaching max batch size. Timer interval and batch size can be
|
||||
|
@ -41,7 +42,7 @@ Refer to the [config.yaml](./config.yaml) for all supported configuration option
|
|||
### Docker database example
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:21
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:22
|
||||
GOOS=linux make build run
|
||||
make run-hotrod
|
||||
```
|
||||
|
@ -50,7 +51,7 @@ Open [localhost:16686](http://localhost:16686) and [localhost:8080](http://local
|
|||
|
||||
### Custom database
|
||||
|
||||
You need to specify connection options in config.yaml file, then you can run
|
||||
You need to specify connection options in `config.yaml`, then you can run
|
||||
|
||||
```bash
|
||||
make build
|
||||
|
@ -59,4 +60,6 @@ SPAN_STORAGE_TYPE=grpc-plugin {Jaeger binary adress} --query.ui-config=jaeger-ui
|
|||
|
||||
## Credits
|
||||
|
||||
This project is based on https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse.
|
||||
This project is originally based on [this clickhouse plugin implementation](https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse).
|
||||
|
||||
See also [jaegertracing/jaeger/issues/1438](https://github.com/jaegertracing/jaeger/issues/1438) for historical discussion regarding the implementation of a ClickHouse plugin.
|
||||
|
|
|
@ -11,13 +11,13 @@ Pull requests also will be greatly appreciated.
|
|||
|
||||
[ClickHouse](https://github.com/clickhouse/clickhouse) is an analytical column-oriented database management system. It is designed to analyze streams of clicks which are kind of resemblant to spans. It's open-source, optimized for performance, and actively developed.
|
||||
|
||||
## How it works?
|
||||
## How does it work?
|
||||
|
||||
Jaeger spans are stored in 2 tables. First one contains whole span encoded either in JSON or Protobuf.
|
||||
Second stores key information about spans for searching. This table is indexed by span duration and tags.
|
||||
Also, info about operations is stored in the materialized view. There are no indexes for archived spans.
|
||||
Storing data in replicated local tables with distributed global tables is natively supported. Spans are buffered.
|
||||
Span buffers are flushed to DB either by timer or after reaching max batch size.
|
||||
Span buffers are flushed to DB either by timer or after reaching max batch size.
|
||||
Timer interval and batch size can be set in [config file](../config.yaml).
|
||||
|
||||

|
||||
|
@ -48,7 +48,7 @@ Disk usage, [bytes]
|
|||
|
||||
```sql
|
||||
SELECT count()
|
||||
FROM jaeger_index
|
||||
FROM jaeger_index
|
||||
WHERE service = 'tracegen'
|
||||
|
||||
┌──count()─┐
|
||||
|
@ -75,7 +75,7 @@ Refer to the [config.yaml](../config.yaml) for all supported configuration optio
|
|||
### Docker database example
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:21
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:22
|
||||
GOOS=linux make build run
|
||||
make run-hotrod
|
||||
```
|
||||
|
@ -93,4 +93,4 @@ SPAN_STORAGE_TYPE=grpc-plugin {Jaeger binary adress} --query.ui-config=jaeger-ui
|
|||
|
||||
## Credits
|
||||
|
||||
This project is based on https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse.
|
||||
This project is based on https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse.
|
||||
|
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -10,11 +9,11 @@ import (
|
|||
// Package contains time zone info for connecting to ClickHouse servers with non-UTC time zone
|
||||
_ "time/tzdata"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc/shared"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage"
|
||||
)
|
||||
|
@ -32,7 +31,7 @@ func main() {
|
|||
JSONFormat: true,
|
||||
})
|
||||
|
||||
cfgFile, err := ioutil.ReadFile(filepath.Clean(configPath))
|
||||
cfgFile, err := os.ReadFile(filepath.Clean(configPath))
|
||||
if err != nil {
|
||||
logger.Error("Could not read config file", "config", configPath, "error", err)
|
||||
os.Exit(1)
|
||||
|
@ -59,6 +58,7 @@ func main() {
|
|||
}
|
||||
pluginServices.Store = store
|
||||
pluginServices.ArchiveStore = store
|
||||
pluginServices.StreamingSpanWriter = store
|
||||
|
||||
grpc.Serve(&pluginServices)
|
||||
if err = store.Close(); err != nil {
|
||||
|
|
14
config.yaml
14
config.yaml
|
@ -1,6 +1,13 @@
|
|||
address: tcp://some-clickhouse-server:9000
|
||||
# When empty the embedded scripts from sqlscripts directory are used
|
||||
address: some-clickhouse-server:9000
|
||||
# Directory with .sql files to run at plugin startup, mainly for integration tests.
|
||||
# Depending on the value of "init_tables", this can be run as a
|
||||
# replacement or supplement to creating default tables for span storage.
|
||||
# If init_tables is also enabled, the scripts in this directory will be run first.
|
||||
init_sql_scripts_dir:
|
||||
# Whether to automatically attempt to create tables in ClickHouse.
|
||||
# By default, this is enabled if init_sql_scripts_dir is empty,
|
||||
# or disabled if init_sql_scripts_dir is provided.
|
||||
init_tables:
|
||||
# Maximal amount of spans that can be pending writes at a time.
|
||||
# New spans exceeding this limit will be discarded,
|
||||
# keeping memory in check if there are issues writing to ClickHouse.
|
||||
|
@ -21,6 +28,9 @@ username:
|
|||
password:
|
||||
# ClickHouse database name. The database must be created manually before Jaeger starts. Default is "default".
|
||||
database:
|
||||
# If non-empty, enables a tenant column in tables, and uses the provided tenant name for this instance.
|
||||
# Default is empty. See guide-multitenancy.md for more information.
|
||||
tenant:
|
||||
# Endpoint for serving prometheus metrics. Default localhost:9090.
|
||||
metrics_endpoint: localhost:9090
|
||||
# Whether to use sql scripts supporting replication and sharding.
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
<!-- Minimal configuration to enable cluster mode in a single clickhouse process -->
|
||||
<yandex>
|
||||
<macros>
|
||||
<installation>cluster</installation>
|
||||
<all-sharded-shard>0</all-sharded-shard>
|
||||
<cluster>cluster</cluster>
|
||||
<shard>0</shard>
|
||||
<replica>cluster-0-0</replica>
|
||||
</macros>
|
||||
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>2181</tcp_port>
|
||||
<server_id>0</server_id>
|
||||
<log_storage_path>/var/log/clickhouse-server/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>0</id>
|
||||
<hostname>localhost</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
|
||||
<zookeeper>
|
||||
<!-- Clickhouse Keeper -->
|
||||
<node>
|
||||
<host>localhost</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
<distributed_ddl>
|
||||
<path>/clickhouse/cluster/task_queue/ddl</path>
|
||||
</distributed_ddl>
|
||||
</yandex>
|
|
@ -0,0 +1,4 @@
|
|||
address: chi:9000
|
||||
tenant: multi1
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,4 @@
|
|||
address: chi:9000
|
||||
tenant: multi2
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -1,3 +1,3 @@
|
|||
address: tcp://chi:9000
|
||||
address: chi:9000
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,5 @@
|
|||
address: chi:9000
|
||||
replication: true
|
||||
tenant: multi1
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,5 @@
|
|||
address: chi:9000
|
||||
replication: true
|
||||
tenant: multi2
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,4 @@
|
|||
address: chi:9000
|
||||
replication: true
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -2,7 +2,6 @@ package e2etests
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
@ -11,17 +10,17 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/ClickHouse/clickhouse-go" // import driver
|
||||
clickhouse "github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ecodia/golang-awaitility/awaitility"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
clickHouseImage = "yandex/clickhouse-server:21"
|
||||
jaegerImage = "jaegertracing/all-in-one:1.24.0"
|
||||
clickHouseImage = "clickhouse/clickhouse-server:22"
|
||||
jaegerImage = "jaegertracing/all-in-one:1.32.0"
|
||||
|
||||
networkName = "chi-jaeger-test"
|
||||
clickhousePort = "9000/tcp"
|
||||
|
@ -29,11 +28,45 @@ const (
|
|||
jaegerAdminPort = "14269/tcp"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
configs []string
|
||||
chiconf *string
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
if os.Getenv("E2E_TEST") == "" {
|
||||
t.Skip("Set E2E_TEST=true to run the test")
|
||||
}
|
||||
|
||||
// Minimal additional configuration (config.d) to enable cluster mode
|
||||
chireplconf := "clickhouse-replicated.xml"
|
||||
|
||||
tests := map[string]testCase{
|
||||
"local-single": {
|
||||
configs: []string{"config-local-single.yaml"},
|
||||
chiconf: nil,
|
||||
},
|
||||
"local-multi": {
|
||||
configs: []string{"config-local-multi1.yaml", "config-local-multi2.yaml"},
|
||||
chiconf: nil,
|
||||
},
|
||||
"replication-single": {
|
||||
configs: []string{"config-replication-single.yaml"},
|
||||
chiconf: &chireplconf,
|
||||
},
|
||||
"replication-multi": {
|
||||
configs: []string{"config-replication-multi1.yaml", "config-replication-multi2.yaml"},
|
||||
chiconf: &chireplconf,
|
||||
},
|
||||
}
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testE2E(t, test)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testE2E(t *testing.T, test testCase) {
|
||||
ctx := context.Background()
|
||||
workingDir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
@ -44,12 +77,21 @@ func TestE2E(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer network.Remove(ctx)
|
||||
|
||||
var bindMounts map[string]string
|
||||
if test.chiconf != nil {
|
||||
bindMounts = map[string]string{
|
||||
fmt.Sprintf("%s/%s", workingDir, *test.chiconf): "/etc/clickhouse-server/config.d/testconf.xml",
|
||||
}
|
||||
} else {
|
||||
bindMounts = map[string]string{}
|
||||
}
|
||||
chReq := testcontainers.ContainerRequest{
|
||||
Image: clickHouseImage,
|
||||
ExposedPorts: []string{clickhousePort},
|
||||
WaitingFor: &clickhouseWaitStrategy{test: t, pollInterval: time.Millisecond * 200, startupTimeout: time.Minute},
|
||||
Networks: []string{networkName},
|
||||
Hostname: "chi",
|
||||
BindMounts: bindMounts,
|
||||
}
|
||||
chContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: chReq,
|
||||
|
@ -58,53 +100,61 @@ func TestE2E(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer chContainer.Terminate(ctx)
|
||||
|
||||
jaegerReq := testcontainers.ContainerRequest{
|
||||
Image: jaegerImage,
|
||||
ExposedPorts: []string{jaegerQueryPort, jaegerAdminPort},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort(jaegerAdminPort).WithStartupTimeout(time.Second * 10),
|
||||
Env: map[string]string{
|
||||
"SPAN_STORAGE_TYPE": "grpc-plugin",
|
||||
},
|
||||
Cmd: []string{
|
||||
"--grpc-storage-plugin.binary=/project-dir/jaeger-clickhouse-linux-amd64",
|
||||
"--grpc-storage-plugin.configuration-file=/project-dir/e2etests/config.yaml",
|
||||
"--grpc-storage-plugin.log-level=debug",
|
||||
},
|
||||
BindMounts: map[string]string{
|
||||
workingDir + "/..": "/project-dir",
|
||||
},
|
||||
Networks: []string{networkName},
|
||||
jaegerContainers := make([]testcontainers.Container, 0)
|
||||
for _, pluginConfig := range test.configs {
|
||||
jaegerReq := testcontainers.ContainerRequest{
|
||||
Image: jaegerImage,
|
||||
ExposedPorts: []string{jaegerQueryPort, jaegerAdminPort},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort(jaegerAdminPort).WithStartupTimeout(time.Second * 10),
|
||||
Env: map[string]string{
|
||||
"SPAN_STORAGE_TYPE": "grpc-plugin",
|
||||
},
|
||||
Cmd: []string{
|
||||
"--grpc-storage-plugin.binary=/project-dir/jaeger-clickhouse-linux-amd64",
|
||||
fmt.Sprintf("--grpc-storage-plugin.configuration-file=/project-dir/e2etests/%s", pluginConfig),
|
||||
"--grpc-storage-plugin.log-level=debug",
|
||||
},
|
||||
BindMounts: map[string]string{
|
||||
workingDir + "/..": "/project-dir",
|
||||
},
|
||||
Networks: []string{networkName},
|
||||
}
|
||||
// Call Start() manually here so that if it fails then we can still access the logs.
|
||||
jaegerContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: jaegerReq,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
logs, errLogs := jaegerContainer.Logs(ctx)
|
||||
require.NoError(t, errLogs)
|
||||
all, errLogs := ioutil.ReadAll(logs)
|
||||
require.NoError(t, errLogs)
|
||||
fmt.Printf("Jaeger logs:\n---->\n%s<----\n\n", string(all))
|
||||
jaegerContainer.Terminate(ctx)
|
||||
}()
|
||||
err = jaegerContainer.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
jaegerContainers = append(jaegerContainers, jaegerContainer)
|
||||
}
|
||||
jaegerContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: jaegerReq,
|
||||
Started: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
logs, errLogs := jaegerContainer.Logs(ctx)
|
||||
require.NoError(t, errLogs)
|
||||
all, errLogs := ioutil.ReadAll(logs)
|
||||
require.NoError(t, errLogs)
|
||||
fmt.Printf("Jaeger logs:\n---->\n%s<----\n\n", string(all))
|
||||
jaegerContainer.Terminate(ctx)
|
||||
}()
|
||||
|
||||
chContainer.MappedPort(ctx, clickhousePort)
|
||||
jaegerQueryPort, err := jaegerContainer.MappedPort(ctx, jaegerQueryPort)
|
||||
require.NoError(t, err)
|
||||
for _, jaegerContainer := range jaegerContainers {
|
||||
jaegerQueryPort, err := jaegerContainer.MappedPort(ctx, jaegerQueryPort)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = awaitility.Await(100*time.Millisecond, time.Second*3, func() bool {
|
||||
// Jaeger traces itself so this request generates some spans
|
||||
response, errHTTP := http.Get(fmt.Sprintf("http://localhost:%d/api/services", jaegerQueryPort.Int()))
|
||||
require.NoError(t, errHTTP)
|
||||
body, errHTTP := ioutil.ReadAll(response.Body)
|
||||
require.NoError(t, errHTTP)
|
||||
var r result
|
||||
errHTTP = json.Unmarshal(body, &r)
|
||||
require.NoError(t, errHTTP)
|
||||
return len(r.Data) == 1 && r.Data[0] == "jaeger-query"
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = awaitility.Await(100*time.Millisecond, time.Second*3, func() bool {
|
||||
// Jaeger traces itself so this request generates some spans
|
||||
response, errHTTP := http.Get(fmt.Sprintf("http://localhost:%d/api/services", jaegerQueryPort.Int()))
|
||||
require.NoError(t, errHTTP)
|
||||
body, errHTTP := ioutil.ReadAll(response.Body)
|
||||
require.NoError(t, errHTTP)
|
||||
var r result
|
||||
errHTTP = json.Unmarshal(body, &r)
|
||||
require.NoError(t, errHTTP)
|
||||
return len(r.Data) == 1 && r.Data[0] == "jaeger-query"
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
|
@ -125,7 +175,18 @@ func (c *clickhouseWaitStrategy) WaitUntilReady(ctx context.Context, target wait
|
|||
|
||||
port, err := target.MappedPort(ctx, clickhousePort)
|
||||
require.NoError(c.test, err)
|
||||
db, err := sql.Open("clickhouse", fmt.Sprintf("tcp://localhost:%d?database=default", port.Int()))
|
||||
|
||||
db := clickhouse.OpenDB(&clickhouse.Options{
|
||||
Addr: []string{
|
||||
fmt.Sprintf("localhost:%d", port.Int()),
|
||||
},
|
||||
Auth: clickhouse.Auth{
|
||||
Database: "default",
|
||||
},
|
||||
Compression: &clickhouse.Compression{
|
||||
Method: clickhouse.CompressionLZ4,
|
||||
},
|
||||
})
|
||||
require.NoError(c.test, err)
|
||||
|
||||
for {
|
||||
|
|
7
embed.go
7
embed.go
|
@ -2,8 +2,5 @@ package jaegerclickhouse
|
|||
|
||||
import "embed"
|
||||
|
||||
//go:embed sqlscripts/local/*
|
||||
var EmbeddedFilesNoReplication embed.FS
|
||||
|
||||
//go:embed sqlscripts/replication/*
|
||||
var EmbeddedFilesReplication embed.FS
|
||||
//go:embed sqlscripts/*
|
||||
var SQLScripts embed.FS
|
||||
|
|
115
go.mod
115
go.mod
|
@ -1,17 +1,32 @@
|
|||
module github.com/jaegertracing/jaeger-clickhouse
|
||||
|
||||
go 1.17
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.3.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/ecodia/golang-awaitility v0.0.0-20180710094957-fb55e59708c7
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/hashicorp/go-hclog v1.3.1
|
||||
github.com/jaegertracing/jaeger v1.38.2-0.20221007043206-b4c88ddf6cdd
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/testcontainers/testcontainers-go v0.11.1
|
||||
go.uber.org/zap v1.23.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/ClickHouse/clickhouse-go v1.4.5
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/ClickHouse/ch-go v0.47.3 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.16 // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect
|
||||
github.com/containerd/containerd v1.5.0-beta.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
|
@ -19,27 +34,27 @@ require (
|
|||
github.com/docker/docker v20.10.7+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/ecodia/golang-awaitility v0.0.0-20180710094957-fb55e59708c7
|
||||
github.com/fatih/color v1.9.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.6.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/hashicorp/go-hclog v0.16.1
|
||||
github.com/hashicorp/go-plugin v1.4.2 // indirect
|
||||
github.com/hashicorp/go-plugin v1.4.5 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d // indirect
|
||||
github.com/jaegertracing/jaeger v1.24.0
|
||||
github.com/kr/pretty v0.2.1
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/mattn/go-colorable v0.1.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.10 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.4.1 // indirect
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
|
@ -48,38 +63,40 @@ require (
|
|||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc93 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pelletier/go-toml v1.9.3 // indirect
|
||||
github.com/paulmach/orb v0.7.1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.15 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.29.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/cast v1.3.1 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/cobra v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.8.1 // indirect
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/testcontainers/testcontainers-go v0.11.1
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
github.com/spf13/viper v1.13.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.8.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.18.1
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
|
||||
google.golang.org/grpc v1.39.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
golang.org/x/net v0.0.0-20221002022538-bcab6841153b // indirect
|
||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc // indirect
|
||||
google.golang.org/grpc v1.50.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/spf13/cobra v0.0.7 // indirect
|
||||
)
|
||||
|
|
|
@ -4,7 +4,7 @@ This is a guide to deploy Jaeger with Clickhouse storage on Kubernetes.
|
|||
|
||||
## Prerequisites
|
||||
|
||||
1. Deploy [Jaeger operator](https://github.com/jaegertracing/jaeger-operator). Note that `gprc-plugin` storage type is supported since version 1.25.0.
|
||||
1. Deploy [Jaeger operator](https://github.com/jaegertracing/jaeger-operator). Note that `grpc-plugin` storage type is supported since version 1.25.0.
|
||||
2. Deploy [Clickhouse operator](https://github.com/Altinity/clickhouse-operator)
|
||||
3. Deploy [Zookeeper](https://github.com/Altinity/clickhouse-operator/blob/master/docs/replication_setup.md) (if replication is used)
|
||||
|
||||
|
@ -41,7 +41,7 @@ metadata:
|
|||
jaeger-clickhouse: demo
|
||||
data:
|
||||
config.yaml: |
|
||||
address: tcp://clickhouse-jaeger:9000
|
||||
address: clickhouse-jaeger:9000
|
||||
username: clickhouse_operator
|
||||
password: clickhouse_operator_password
|
||||
spans_table:
|
||||
|
|
|
@ -1,16 +1,78 @@
|
|||
# Multi-tenant deployment
|
||||
|
||||
Multi-tenant deployment requires using a database per tenant. Each tenant will talk
|
||||
to a separate Jaeger query and collector (or all-in-one).
|
||||
It may be desirable to share a common ClickHouse instance across multiple Jaeger instances.
|
||||
There are two ways of doing this, depending on whether spanning the tenants across separate databases is preferable.
|
||||
|
||||
Create a database:
|
||||
## Shared database/tables
|
||||
|
||||
```sql
|
||||
CREATE DATABASE tenant_1 ENGINE=Atomic;
|
||||
```
|
||||
If you wish to reuse the same ClickHouse database/tables across all tenants, you can specify a different `tenant: "<name>"` in each jaeger-clickhouse instance config.
|
||||
|
||||
When a non-empty `tenant` is specified, all tables will be created with a `tenant` column, and all reads/writes for a given Jaeger instance will be applied against the configured tenant name for that instance.
|
||||
|
||||
1. Create a shared database:
|
||||
```sql
|
||||
CREATE DATABASE shared ENGINE=Atomic
|
||||
```
|
||||
2. Configure the per-tenant jaeger-clickhouse clients to specify tenant names:
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_1
|
||||
```
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_2
|
||||
```
|
||||
|
||||
Multitenant mode must be enabled when the deployment is first created and cannot be toggled later, except perhaps by manually adding/removing the `tenant` column from all tables.
|
||||
Multitenant/singletenant instances must not be mixed within the same database - the two modes are mutually exclusive of each other.
|
||||
|
||||
## Separate databases
|
||||
|
||||
If you wish to keep instances fully separate, you can configure one ClickHouse database per tenant.
|
||||
This may be useful when different per-database configuration across tenants is desirable.
|
||||
|
||||
1. Create a database for each tenant:
|
||||
```sql
|
||||
CREATE DATABASE tenant_1 ENGINE=Atomic;
|
||||
CREATE DATABASE tenant_2 ENGINE=Atomic;
|
||||
```
|
||||
2. Configure the per-tenant jaeger-clickhouse plugins matching databases:
|
||||
```yaml
|
||||
database: tenant_1
|
||||
```
|
||||
```yaml
|
||||
database: tenant_2
|
||||
```
|
||||
|
||||
## Mixing methods in the same ClickHouse instance
|
||||
|
||||
Each of the methods applies on a per-database basis. The methods require different schemas and must not be mixed in a single database, but it is possible to have different databases using different methods in the same ClickHouse instance.
|
||||
|
||||
For example, there could be a `shared` database where multiple tenants are sharing the same tables:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE shared ENGINE=Atomic
|
||||
```
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_1
|
||||
```
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_2
|
||||
```
|
||||
|
||||
Then there could be separate `isolated_x` databases for tenants that should be provided with their own dedicated tables, enabling e.g. better ACL isolation:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE isolated_1 ENGINE=Atomic
|
||||
CREATE DATABASE isolated_2 ENGINE=Atomic
|
||||
```
|
||||
```yaml
|
||||
database: isolated_1
|
||||
```
|
||||
```yaml
|
||||
database: isolated_2
|
||||
```
|
||||
|
||||
Then configure the plugin to use tenant's database:
|
||||
|
||||
```yaml
|
||||
database: tenant_1
|
||||
```
|
||||
|
|
|
@ -15,7 +15,7 @@ Refer to the `config.yaml` how to setup replicated deployment.
|
|||
Sharding is a feature that allows splitting the data into multiple Clickhouse nodes to
|
||||
increase throughput and decrease latency.
|
||||
The sharding feature uses `Distributed` engine that is backed by local tables.
|
||||
The distributed engine is a "virtual" table that does not store any data. It is used as
|
||||
The distributed engine is a "virtual" table that does not store any data. It is used as
|
||||
an interface to insert and query data.
|
||||
|
||||
To setup sharding run the following statements on all nodes in the cluster.
|
||||
|
@ -30,7 +30,7 @@ CREATE TABLE IF NOT EXISTS jaeger_index AS jaeger_index_local ENGINE = Distribut
|
|||
CREATE TABLE IF NOT EXISTS jaeger_operations AS jaeger_operations_local ENGINE = Distributed('{cluster}', default, jaeger_operations_local, rand());
|
||||
```
|
||||
|
||||
* The `AS <table-name>` statement creates table with the same schema as the specified one.
|
||||
* The `AS <table-name>` statement creates table with the same schema as the specified one.
|
||||
* The `Distributed` engine takes as parameters cluster , database, table name and sharding key.
|
||||
|
||||
If the distributed table is not created on all Clickhouse nodes the Jaeger query fails to get the data from the storage.
|
||||
|
@ -56,7 +56,7 @@ EOF
|
|||
|
||||
Use the following command to run `clickhouse-client` on Clickhouse nodes and create the distributed tables:
|
||||
```bash
|
||||
kubectl exec -it statefulset.apps/chi-jaeger-cluster1-0-0 -- clickhouse-client
|
||||
kubectl exec -it statefulset.apps/chi-jaeger-cluster1-0-0 -- clickhouse-client
|
||||
```
|
||||
|
||||
### Plugin configuration
|
||||
|
@ -64,7 +64,7 @@ kubectl exec -it statefulset.apps/chi-jaeger-cluster1-0-0 -- clickhouse-client
|
|||
The plugin has to be configured to write and read that from the global tables:
|
||||
|
||||
```yaml
|
||||
address: tcp://clickhouse-jaeger:9000
|
||||
address: clickhouse-jaeger:9000
|
||||
# database: jaeger
|
||||
spans_table: jaeger_spans
|
||||
spans_index_table: jaeger_index
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
address: localhost:9000
|
||||
init_sql_scripts_dir: init_sql_scripts
|
||||
init_tables: true
|
|
@ -1,380 +0,0 @@
|
|||
[
|
||||
{
|
||||
"Caption": "Tags in one spot - Tags",
|
||||
"Query": {
|
||||
"ServiceName": "query01-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["span_tags_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags in one spot - Logs",
|
||||
"Query": {
|
||||
"ServiceName": "query02-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["log_tags_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags in one spot - Process",
|
||||
"Query": {
|
||||
"ServiceName": "query03-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["process_tags_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags in different spots",
|
||||
"Query": {
|
||||
"ServiceName": "query04-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multi_spot_tags_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Trace spans over multiple indices",
|
||||
"Query": {
|
||||
"ServiceName": "query05-service",
|
||||
"OperationName": "",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T00:00:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T00:07:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multi_index_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Operation name",
|
||||
"Query": {
|
||||
"ServiceName": "query06-service",
|
||||
"OperationName": "query06-operation",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["opname_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Operation name + max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query07-service",
|
||||
"OperationName": "query07-operation",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 2000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["opname_maxdur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Operation name + Duration range",
|
||||
"Query": {
|
||||
"ServiceName": "query08-service",
|
||||
"OperationName": "query08-operation",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 4500,
|
||||
"DurationMax": 5500,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["opname_dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Duration range",
|
||||
"Query": {
|
||||
"ServiceName": "query09-service",
|
||||
"OperationName": "",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 4500,
|
||||
"DurationMax": 5500,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query10-service",
|
||||
"OperationName": "",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 1000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["max_dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "default",
|
||||
"Query": {
|
||||
"ServiceName": "query11-service",
|
||||
"OperationName": "",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["default"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags + Operation name",
|
||||
"Query": {
|
||||
"ServiceName": "query12-service",
|
||||
"OperationName": "query12-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_opname_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags + Operation name + max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query13-service",
|
||||
"OperationName": "query13-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 2000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_opname_maxdur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags + Operation name + Duration range",
|
||||
"Query": {
|
||||
"ServiceName": "query14-service",
|
||||
"OperationName": "query14-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 4500,
|
||||
"DurationMax": 5500,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_opname_dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags + Duration range",
|
||||
"Query": {
|
||||
"ServiceName": "query15-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 4500,
|
||||
"DurationMax": 5500,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tags + max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query16-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 1000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_maxdur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Multi-spot Tags + Operation name",
|
||||
"Query": {
|
||||
"ServiceName": "query17-service",
|
||||
"OperationName": "query17-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multispottag_opname_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Multi-spot Tags + Operation name + max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query18-service",
|
||||
"OperationName": "query18-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 2000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multispottag_opname_maxdur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Multi-spot Tags + Operation name + Duration range",
|
||||
"Query": {
|
||||
"ServiceName": "query19-service",
|
||||
"OperationName": "query19-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 4500,
|
||||
"DurationMax": 5500,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multispottag_opname_dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Multi-spot Tags + Duration range",
|
||||
"Query": {
|
||||
"ServiceName": "query20-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 4500,
|
||||
"DurationMax": 5500,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multispottag_dur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Multi-spot Tags + max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query21-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"sameplacevalue",
|
||||
"sameplacetag2":"123",
|
||||
"sameplacetag3":"72.5",
|
||||
"sameplacetag4":"true"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 1000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multispottag_maxdur_trace"]
|
||||
},
|
||||
{
|
||||
"Caption": "Multiple Traces",
|
||||
"Query": {
|
||||
"ServiceName": "query22-service",
|
||||
"OperationName": "",
|
||||
"Tags": null,
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["multiple1_trace", "multiple2_trace", "multiple3_trace"]
|
||||
}
|
||||
]
|
|
@ -1,34 +0,0 @@
|
|||
[
|
||||
{
|
||||
"Caption": "Tag escaped operator + Operation name + max Duration",
|
||||
"Query": {
|
||||
"ServiceName": "query23-service",
|
||||
"OperationName": "query23-operation",
|
||||
"Tags": {
|
||||
"sameplacetag1":"same\\*"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 1000,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_escaped_operator_trace_1"]
|
||||
},
|
||||
{
|
||||
"Caption": "Tag wildcard regex",
|
||||
"Query": {
|
||||
"ServiceName": "query24-service",
|
||||
"OperationName": "",
|
||||
"Tags": {
|
||||
"sameplacetag1":"same.*"
|
||||
},
|
||||
"StartTimeMin": "2017-01-26T15:46:31.639875Z",
|
||||
"StartTimeMax": "2017-01-26T17:46:31.639875Z",
|
||||
"DurationMin": 0,
|
||||
"DurationMax": 0,
|
||||
"NumTraces": 1000
|
||||
},
|
||||
"ExpectedFixtures": ["tags_wildcard_regex_1", "tags_wildcard_regex_2"]
|
||||
}
|
||||
]
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEQ==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query11-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAACQ==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query09-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEQ==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "example-operation-1",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "example-service-1",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEQ==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "example-operation-2",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "example-service-2",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEQ==",
|
||||
"spanId": "AAAAAAAAAAU=",
|
||||
"operationName": "example-operation-1",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "example-service-3",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEQ==",
|
||||
"spanId": "AAAAAAAAAAY=",
|
||||
"operationName": "example-operation-3",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [{
|
||||
"key": "span.kind",
|
||||
"vType": "STRING",
|
||||
"vStr": "server"
|
||||
}],
|
||||
"process": {
|
||||
"serviceName": "example-service-1",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEQ==",
|
||||
"spanId": "AAAAAAAAAAc=",
|
||||
"operationName": "example-operation-4",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [{
|
||||
"key": "span.kind",
|
||||
"vType": "STRING",
|
||||
"vStr": "client"
|
||||
}],
|
||||
"process": {
|
||||
"serviceName": "example-service-1",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAAg==",
|
||||
"spanId": "AAAAAAAAAAE=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query02-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEA==",
|
||||
"spanId": "AAAAAAAAAAI=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query10-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABQ==",
|
||||
"spanId": "AAAAAAAAAAE=",
|
||||
"operationName": "operation-list-test2",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T00:03:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query05-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABQ==",
|
||||
"spanId": "AAAAAAAAAAI=",
|
||||
"operationName": "operation-list-test3",
|
||||
"references": [],
|
||||
"startTime": "2017-01-25T23:56:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query05-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABA==",
|
||||
"spanId": "AAAAAAAAAAE=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query04-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAACIQ==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query22-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAACIg==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query22-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAACIw==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "100000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query22-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAIA==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query20-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAIQ==",
|
||||
"spanId": "AAAAAAAAAAU=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query21-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAGQ==",
|
||||
"spanId": "AAAAAAAAAAU=",
|
||||
"operationName": "query19-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query19-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAGA==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "query18-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query18-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAFw==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "query17-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query17-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAACA==",
|
||||
"spanId": "AAAAAAAAAAI=",
|
||||
"operationName": "query08-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query08-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABw==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "query07-operation",
|
||||
"tags": [],
|
||||
"references": [
|
||||
{
|
||||
"refType": "CHILD_OF",
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABw==",
|
||||
"spanId": "AAAAAAAAAAI="
|
||||
}
|
||||
],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"process": {
|
||||
"serviceName": "query07-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": []
|
||||
},
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABw==",
|
||||
"spanId": "AAAAAAAAAAI=",
|
||||
"operationName": "query07-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "2000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query07-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAABg==",
|
||||
"spanId": "AAAAAAAAAAE=",
|
||||
"operationName": "query06-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query06-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAAw==",
|
||||
"spanId": "AAAAAAAAAAE=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query03-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAAQ==",
|
||||
"spanId": "AAAAAAAAAAI=",
|
||||
"operationName": "some-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "7000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query01-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": []
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAFQ==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "placeholder",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query15-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAFEh==",
|
||||
"spanId": "AAAAAAAAAAU=",
|
||||
"operationName": "query23-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "same*"
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query23-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAABZEh==",
|
||||
"spanId": "AAAAAAAAAAU=",
|
||||
"operationName": "query23-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacedifferentvalue"
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query23-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAFg==",
|
||||
"spanId": "AAAAAAAAAAU=",
|
||||
"operationName": "",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query16-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAFA==",
|
||||
"spanId": "AAAAAAAAAAM=",
|
||||
"operationName": "query14-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "5000ns",
|
||||
"tags": [],
|
||||
"process": {
|
||||
"serviceName": "query14-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEw==",
|
||||
"spanId": "AAAAAAAAAAc=",
|
||||
"operationName": "query13-operation",
|
||||
"references": [],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "1000ns",
|
||||
"tags": [
|
||||
{
|
||||
"key": "tag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "value1"
|
||||
}
|
||||
],
|
||||
"process": {
|
||||
"serviceName": "query13-service",
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
]
|
||||
},
|
||||
"logs": [
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "tag3",
|
||||
"vType": "STRING",
|
||||
"vStr": "value3"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"timestamp": "2017-01-26T16:46:31.639875Z",
|
||||
"fields": [
|
||||
{
|
||||
"key": "something",
|
||||
"vType": "STRING",
|
||||
"vStr": "blah"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAEg==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "query12-operation",
|
||||
"references": [
|
||||
{
|
||||
"refType": "CHILD_OF",
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAA/w==",
|
||||
"spanId": "AAAAAAAAAP8="
|
||||
},
|
||||
{
|
||||
"refType": "CHILD_OF",
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAAQ==",
|
||||
"spanId": "AAAAAAAAAAI="
|
||||
},
|
||||
{
|
||||
"refType": "FOLLOWS_FROM",
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAAAQ==",
|
||||
"spanId": "AAAAAAAAAAI="
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue"
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag2",
|
||||
"vType": "INT64",
|
||||
"vInt64": 123
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag4",
|
||||
"vType": "BOOL",
|
||||
"vBool": true
|
||||
},
|
||||
{
|
||||
"key": "sameplacetag3",
|
||||
"vType": "FLOAT64",
|
||||
"vFloat64": 72.5
|
||||
},
|
||||
{
|
||||
"key": "blob",
|
||||
"vType": "BINARY",
|
||||
"vBinary": "AAAwOQ=="
|
||||
}
|
||||
],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "2000ns",
|
||||
"process": {
|
||||
"serviceName": "query12-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": []
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAAKEg==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "",
|
||||
"references": [
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue1"
|
||||
}
|
||||
],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "2000ns",
|
||||
"process": {
|
||||
"serviceName": "query24-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": []
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
{
|
||||
"spans": [
|
||||
{
|
||||
"traceId": "AAAAAAAAAAAAAAAAAAASEg==",
|
||||
"spanId": "AAAAAAAAAAQ=",
|
||||
"operationName": "",
|
||||
"references": [
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "sameplacetag1",
|
||||
"vType": "STRING",
|
||||
"vStr": "sameplacevalue2"
|
||||
}
|
||||
],
|
||||
"startTime": "2017-01-26T16:46:31.639875Z",
|
||||
"duration": "2000ns",
|
||||
"process": {
|
||||
"serviceName": "query24-service",
|
||||
"tags": []
|
||||
},
|
||||
"logs": []
|
||||
}
|
||||
]
|
||||
}
|
|
@ -19,19 +19,19 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/jaegertracing/jaeger/pkg/config"
|
||||
"github.com/jaegertracing/jaeger/pkg/metrics"
|
||||
"github.com/jaegertracing/jaeger/pkg/testutils"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/integration"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const defaultPluginBinaryPath = "../../../examples/memstore-plugin/memstore-plugin"
|
||||
|
||||
type GRPCStorageIntegrationTestSuite struct {
|
||||
StorageIntegration
|
||||
integration.StorageIntegration
|
||||
logger *zap.Logger
|
||||
pluginBinaryPath string
|
||||
pluginConfigPath string
|
||||
|
|
|
@ -1 +1 @@
|
|||
DROP DATABASE IF EXISTS default;
|
||||
DROP DATABASE IF EXISTS default;
|
||||
|
|
|
@ -1 +1 @@
|
|||
CREATE DATABASE IF NOT EXISTS default;
|
||||
CREATE DATABASE IF NOT EXISTS default;
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS jaeger_index_local (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
service LowCardinality(String) CODEC(ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC(ZSTD(1)),
|
||||
durationUs UInt64 CODEC(ZSTD(1)),
|
||||
tags Nested
|
||||
(
|
||||
key LowCardinality(String),
|
||||
value String
|
||||
) CODEC(ZSTD(1)),
|
||||
INDEX idx_tag_keys tags.key TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,8 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS jaeger_spans_local (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
model String CODEC(ZSTD(3))
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,13 +0,0 @@
|
|||
CREATE MATERIALIZED VIEW IF NOT EXISTS jaeger_operations_local
|
||||
ENGINE SummingMergeTree
|
||||
PARTITION BY toYYYYMM(date) ORDER BY (date, service, operation)
|
||||
SETTINGS index_granularity=32
|
||||
POPULATE
|
||||
AS SELECT
|
||||
toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() as count,
|
||||
if(has(tags.key, 'span.kind'), tags.value[indexOf(tags.key, 'span.kind')], '') as spankind
|
||||
FROM jaeger_index_local
|
||||
GROUP BY date, service, operation, tags.key, tags.value
|
|
@ -1,8 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS jaeger_spans_archive_local (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
model String CODEC(ZSTD(3))
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,390 +0,0 @@
|
|||
// Copyright (c) 2019 The Jaeger Authors.
|
||||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/jaegertracing/jaeger/storage/dependencystore"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
)
|
||||
|
||||
const (
|
||||
iterations = 100
|
||||
)
|
||||
|
||||
// StorageIntegration holds components for storage integration test
|
||||
type StorageIntegration struct {
|
||||
SpanWriter spanstore.Writer
|
||||
SpanReader spanstore.Reader
|
||||
DependencyWriter dependencystore.Writer
|
||||
DependencyReader dependencystore.Reader
|
||||
Fixtures []*QueryFixtures
|
||||
// TODO: remove this flag after all storage plugins returns spanKind with operationNames
|
||||
NotSupportSpanKindWithOperation bool
|
||||
FixturesPath string
|
||||
|
||||
// CleanUp() should ensure that the storage backend is clean before another test.
|
||||
// called either before or after each test, and should be idempotent
|
||||
CleanUp func() error
|
||||
|
||||
// Refresh() should ensure that the storage backend is up to date before being queried.
|
||||
// called between set-up and queries in each test
|
||||
Refresh func() error
|
||||
}
|
||||
|
||||
// === SpanStore Integration Tests ===
|
||||
|
||||
// QueryFixtures and TraceFixtures are under ./fixtures/queries.json and ./fixtures/traces/*.json respectively.
|
||||
// Each query fixture includes:
|
||||
// Caption: describes the query we are testing
|
||||
// Query: the query we are testing
|
||||
// ExpectedFixture: the trace fixture that we want back from these queries.
|
||||
// Queries are not necessarily numbered, but since each query requires a service name,
|
||||
// the service name is formatted "query##-service".
|
||||
type QueryFixtures struct {
|
||||
Caption string
|
||||
Query *spanstore.TraceQueryParameters
|
||||
ExpectedFixtures []string
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) cleanUp(t *testing.T) {
|
||||
require.NotNil(t, s.CleanUp, "CleanUp function must be provided")
|
||||
require.NoError(t, s.CleanUp())
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) refresh(t *testing.T) {
|
||||
require.NotNil(t, s.Refresh, "Refresh function must be provided")
|
||||
require.NoError(t, s.Refresh())
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) waitForCondition(t *testing.T, predicate func(t *testing.T) bool) bool {
|
||||
for i := 0; i < iterations; i++ {
|
||||
t.Logf("Waiting for storage backend to update documents, iteration %d out of %d", i+1, iterations)
|
||||
if predicate(t) {
|
||||
return true
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond) // Will wait up to 10 seconds at worst.
|
||||
}
|
||||
return predicate(t)
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) testGetServices(t *testing.T) {
|
||||
defer s.cleanUp(t)
|
||||
|
||||
expected := []string{"example-service-1", "example-service-2", "example-service-3"}
|
||||
s.loadParseAndWriteExampleTrace(t)
|
||||
s.refresh(t)
|
||||
|
||||
var actual []string
|
||||
var err error
|
||||
found := s.waitForCondition(t, func(t *testing.T) bool {
|
||||
actual, err = s.SpanReader.GetServices(context.Background())
|
||||
require.NoError(t, err)
|
||||
return assert.ObjectsAreEqualValues(expected, actual)
|
||||
})
|
||||
|
||||
if !assert.True(t, found) {
|
||||
t.Log("\t Expected:", expected)
|
||||
t.Log("\t Actual :", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) testGetLargeSpan(t *testing.T) {
|
||||
defer s.cleanUp(t)
|
||||
|
||||
t.Log("Testing Large Trace over 10K ...")
|
||||
expected := s.loadParseAndWriteLargeTrace(t)
|
||||
expectedTraceID := expected.Spans[0].TraceID
|
||||
s.refresh(t)
|
||||
|
||||
var actual *model.Trace
|
||||
found := s.waitForCondition(t, func(t *testing.T) bool {
|
||||
var err error
|
||||
actual, err = s.SpanReader.GetTrace(context.Background(), expectedTraceID)
|
||||
return err == nil && len(actual.Spans) == len(expected.Spans)
|
||||
})
|
||||
if !assert.True(t, found) {
|
||||
CompareTraces(t, expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) testGetOperations(t *testing.T) {
|
||||
defer s.cleanUp(t)
|
||||
|
||||
var expected []spanstore.Operation
|
||||
if s.NotSupportSpanKindWithOperation {
|
||||
expected = []spanstore.Operation{
|
||||
{Name: "example-operation-1"},
|
||||
{Name: "example-operation-3"},
|
||||
{Name: "example-operation-4"},
|
||||
}
|
||||
} else {
|
||||
expected = []spanstore.Operation{
|
||||
{Name: "example-operation-1"},
|
||||
{Name: "example-operation-3", SpanKind: "server"},
|
||||
{Name: "example-operation-4", SpanKind: "client"},
|
||||
}
|
||||
}
|
||||
s.loadParseAndWriteExampleTrace(t)
|
||||
s.refresh(t)
|
||||
|
||||
var actual []spanstore.Operation
|
||||
found := s.waitForCondition(t, func(t *testing.T) bool {
|
||||
var err error
|
||||
actual, err = s.SpanReader.GetOperations(context.Background(),
|
||||
spanstore.OperationQueryParameters{ServiceName: "example-service-1"})
|
||||
require.NoError(t, err)
|
||||
return assert.ObjectsAreEqualValues(expected, actual)
|
||||
})
|
||||
|
||||
if !assert.True(t, found) {
|
||||
t.Log("\t Expected:", expected)
|
||||
t.Log("\t Actual :", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) testGetTrace(t *testing.T) {
|
||||
defer s.cleanUp(t)
|
||||
|
||||
expected := s.loadParseAndWriteExampleTrace(t)
|
||||
expectedTraceID := expected.Spans[0].TraceID
|
||||
s.refresh(t)
|
||||
|
||||
var actual *model.Trace
|
||||
found := s.waitForCondition(t, func(t *testing.T) bool {
|
||||
var err error
|
||||
actual, err = s.SpanReader.GetTrace(context.Background(), expectedTraceID)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
return err == nil && len(actual.Spans) == len(expected.Spans)
|
||||
})
|
||||
if !assert.True(t, found) {
|
||||
CompareTraces(t, expected, actual)
|
||||
}
|
||||
|
||||
t.Run("NotFound error", func(t *testing.T) {
|
||||
fakeTraceID := model.TraceID{High: 0, Low: 0}
|
||||
trace, err := s.SpanReader.GetTrace(context.Background(), fakeTraceID)
|
||||
assert.Equal(t, spanstore.ErrTraceNotFound, err)
|
||||
assert.Nil(t, trace)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) testFindTraces(t *testing.T) {
|
||||
// TODO: Fix this
|
||||
t.Skip("Not suitable for ClickHouse")
|
||||
defer s.cleanUp(t)
|
||||
|
||||
fixturesPath := s.FixturesPath
|
||||
if s.FixturesPath == "" {
|
||||
fixturesPath = "."
|
||||
}
|
||||
// Note: all cases include ServiceName + StartTime range
|
||||
s.Fixtures = append(s.Fixtures, LoadAndParseQueryTestCases(t, fmt.Sprintf("%s/fixtures/queries.json", fixturesPath))...)
|
||||
|
||||
// Each query test case only specifies matching traces, but does not provide counterexamples.
|
||||
// To improve coverage we get all possible traces and store all of them before running queries.
|
||||
allTraceFixtures := make(map[string]*model.Trace)
|
||||
expectedTracesPerTestCase := make([][]*model.Trace, 0, len(s.Fixtures))
|
||||
for _, queryTestCase := range s.Fixtures {
|
||||
var expected []*model.Trace
|
||||
for _, traceFixture := range queryTestCase.ExpectedFixtures {
|
||||
trace, ok := allTraceFixtures[traceFixture]
|
||||
if !ok {
|
||||
trace = s.getTraceFixture(t, traceFixture)
|
||||
err := s.writeTrace(t, trace)
|
||||
require.NoError(t, err, "Unexpected error when writing trace %s to storage", traceFixture)
|
||||
allTraceFixtures[traceFixture] = trace
|
||||
}
|
||||
expected = append(expected, trace)
|
||||
}
|
||||
expectedTracesPerTestCase = append(expectedTracesPerTestCase, expected)
|
||||
}
|
||||
s.refresh(t)
|
||||
for i, queryTestCase := range s.Fixtures {
|
||||
t.Run(queryTestCase.Caption, func(t *testing.T) {
|
||||
expected := expectedTracesPerTestCase[i]
|
||||
actual := s.findTracesByQuery(t, queryTestCase.Query, expected)
|
||||
CompareSliceOfTraces(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) findTracesByQuery(t *testing.T, query *spanstore.TraceQueryParameters, expected []*model.Trace) []*model.Trace {
|
||||
var traces []*model.Trace
|
||||
found := s.waitForCondition(t, func(t *testing.T) bool {
|
||||
var err error
|
||||
traces, err = s.SpanReader.FindTraces(context.Background(), query)
|
||||
if err == nil && tracesMatch(t, traces, expected) {
|
||||
return true
|
||||
}
|
||||
t.Logf("FindTraces: expected: %d, actual: %d, match: false", len(expected), len(traces))
|
||||
return false
|
||||
})
|
||||
require.True(t, found)
|
||||
return traces
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) writeTrace(_ *testing.T, trace *model.Trace) error {
|
||||
for _, span := range trace.Spans {
|
||||
if err := s.SpanWriter.WriteSpan(context.Background(), span); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) loadParseAndWriteExampleTrace(t *testing.T) *model.Trace {
|
||||
trace := s.getTraceFixture(t, "example_trace")
|
||||
err := s.writeTrace(t, trace)
|
||||
require.NoError(t, err, "Not expecting error when writing example_trace to storage")
|
||||
return trace
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) loadParseAndWriteLargeTrace(t *testing.T) *model.Trace {
|
||||
trace := s.getTraceFixture(t, "example_trace")
|
||||
span := trace.Spans[0]
|
||||
spns := make([]*model.Span, 1, 10008)
|
||||
trace.Spans = spns
|
||||
trace.Spans[0] = span
|
||||
for i := 1; i < 10008; i++ {
|
||||
s := new(model.Span)
|
||||
*s = *span
|
||||
s.StartTime = s.StartTime.Add(time.Second * time.Duration(i+1))
|
||||
trace.Spans = append(trace.Spans, s)
|
||||
}
|
||||
err := s.writeTrace(t, trace)
|
||||
require.NoError(t, err, "Not expecting error when writing example_trace to storage")
|
||||
return trace
|
||||
}
|
||||
|
||||
func (s *StorageIntegration) getTraceFixture(t *testing.T, fixture string) *model.Trace {
|
||||
fixturesPath := s.FixturesPath
|
||||
if s.FixturesPath == "" {
|
||||
fixturesPath = "."
|
||||
}
|
||||
fileName := fmt.Sprintf("%s/fixtures/traces/%s.json", fixturesPath, fixture)
|
||||
return getTraceFixtureExact(t, fileName)
|
||||
}
|
||||
|
||||
func getTraceFixtureExact(t *testing.T, fileName string) *model.Trace {
|
||||
var trace model.Trace
|
||||
loadAndParseJSONPB(t, fileName, &trace)
|
||||
return &trace
|
||||
}
|
||||
|
||||
func loadAndParseJSONPB(t *testing.T, path string, object proto.Message) {
|
||||
// #nosec
|
||||
inStr, err := ioutil.ReadFile(path)
|
||||
require.NoError(t, err, "Not expecting error when loading fixture %s", path)
|
||||
err = jsonpb.Unmarshal(bytes.NewReader(correctTime(inStr)), object)
|
||||
require.NoError(t, err, "Not expecting error when unmarshaling fixture %s", path)
|
||||
}
|
||||
|
||||
// LoadAndParseQueryTestCases loads and parses query test cases
|
||||
func LoadAndParseQueryTestCases(t *testing.T, queriesFile string) []*QueryFixtures {
|
||||
var queries []*QueryFixtures
|
||||
loadAndParseJSON(t, queriesFile, &queries)
|
||||
return queries
|
||||
}
|
||||
|
||||
func loadAndParseJSON(t *testing.T, path string, object interface{}) {
|
||||
// #nosec
|
||||
inStr, err := ioutil.ReadFile(path)
|
||||
require.NoError(t, err, "Not expecting error when loading fixture %s", path)
|
||||
err = json.Unmarshal(correctTime(inStr), object)
|
||||
require.NoError(t, err, "Not expecting error when unmarshaling fixture %s", path)
|
||||
}
|
||||
|
||||
// required, because we want to only query on recent traces, so we replace all the dates with recent dates.
|
||||
func correctTime(json []byte) []byte {
|
||||
jsonString := string(json)
|
||||
now := time.Now().UTC()
|
||||
today := now.Format("2006-01-02")
|
||||
yesterday := now.AddDate(0, 0, -1).Format("2006-01-02")
|
||||
retString := strings.ReplaceAll(jsonString, "2017-01-26", today)
|
||||
retString = strings.ReplaceAll(retString, "2017-01-25", yesterday)
|
||||
return []byte(retString)
|
||||
}
|
||||
|
||||
func tracesMatch(t *testing.T, actual []*model.Trace, expected []*model.Trace) bool {
|
||||
if !assert.Equal(t, len(expected), len(actual), "Expecting certain number of traces") {
|
||||
return false
|
||||
}
|
||||
return assert.Equal(t, spanCount(expected), spanCount(actual), "Expecting certain number of spans")
|
||||
}
|
||||
|
||||
func spanCount(traces []*model.Trace) int {
|
||||
var count int
|
||||
for _, trace := range traces {
|
||||
count += len(trace.Spans)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// === DependencyStore Integration Tests ===
|
||||
|
||||
func (s *StorageIntegration) testGetDependencies(t *testing.T) {
|
||||
if s.DependencyReader == nil || s.DependencyWriter == nil {
|
||||
t.Skipf("Skipping GetDependencies test because dependency reader or writer is nil")
|
||||
return
|
||||
}
|
||||
|
||||
defer s.cleanUp(t)
|
||||
|
||||
expected := []model.DependencyLink{
|
||||
{
|
||||
Parent: "hello",
|
||||
Child: "world",
|
||||
CallCount: uint64(1),
|
||||
},
|
||||
{
|
||||
Parent: "world",
|
||||
Child: "hello",
|
||||
CallCount: uint64(3),
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.DependencyWriter.WriteDependencies(time.Now(), expected))
|
||||
s.refresh(t)
|
||||
actual, err := s.DependencyReader.GetDependencies(context.Background(), time.Now(), 5*time.Minute)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, expected, actual)
|
||||
}
|
||||
|
||||
// IntegrationTestAll runs all integration tests
|
||||
func (s *StorageIntegration) IntegrationTestAll(t *testing.T) {
|
||||
t.Run("GetServices", s.testGetServices)
|
||||
t.Run("GetOperations", s.testGetOperations)
|
||||
t.Run("GetTrace", s.testGetTrace)
|
||||
t.Run("GetLargeSpans", s.testGetLargeSpan)
|
||||
t.Run("FindTraces", s.testFindTraces)
|
||||
t.Run("GetDependencies", s.testGetDependencies)
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
address: tcp://localhost:9000
|
||||
init_sql_scripts_dir: init_sql_scripts
|
|
@ -1,85 +0,0 @@
|
|||
// Copyright (c) 2019 The Jaeger Authors.
|
||||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
)
|
||||
|
||||
// CompareSliceOfTraces compares two trace slices
|
||||
func CompareSliceOfTraces(t *testing.T, expected []*model.Trace, actual []*model.Trace) {
|
||||
require.Equal(t, len(expected), len(actual), "Unequal number of expected vs. actual traces")
|
||||
model.SortTraces(expected)
|
||||
model.SortTraces(actual)
|
||||
for i := range expected {
|
||||
checkSize(t, expected[i], actual[i])
|
||||
}
|
||||
if diff := pretty.Diff(expected, actual); len(diff) > 0 {
|
||||
for _, d := range diff {
|
||||
t.Logf("Expected and actual differ: %s\n", d)
|
||||
}
|
||||
out, err := json.Marshal(actual)
|
||||
out2, err2 := json.Marshal(expected)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err2)
|
||||
t.Logf("Actual traces: %s", string(out))
|
||||
t.Logf("Expected traces: %s", string(out2))
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
// CompareTraces compares two traces
|
||||
func CompareTraces(t *testing.T, expected *model.Trace, actual *model.Trace) {
|
||||
if expected.Spans == nil {
|
||||
require.Nil(t, actual.Spans)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, actual)
|
||||
require.NotNil(t, actual.Spans)
|
||||
model.SortTrace(expected)
|
||||
model.SortTrace(actual)
|
||||
checkSize(t, expected, actual)
|
||||
|
||||
if diff := pretty.Diff(expected, actual); len(diff) > 0 {
|
||||
for _, d := range diff {
|
||||
t.Logf("Expected and actual differ: %v\n", d)
|
||||
}
|
||||
out, err := json.Marshal(actual)
|
||||
assert.NoError(t, err)
|
||||
t.Logf("Actual trace: %s", string(out))
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func checkSize(t *testing.T, expected *model.Trace, actual *model.Trace) {
|
||||
require.Equal(t, len(expected.Spans), len(actual.Spans))
|
||||
for i := range expected.Spans {
|
||||
expectedSpan := expected.Spans[i]
|
||||
actualSpan := actual.Spans[i]
|
||||
require.True(t, len(expectedSpan.Tags) == len(actualSpan.Tags))
|
||||
require.True(t, len(expectedSpan.Logs) == len(actualSpan.Logs))
|
||||
if expectedSpan.Process != nil && actualSpan.Process != nil {
|
||||
require.True(t, len(expectedSpan.Process.Tags) == len(actualSpan.Process.Tags))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,11 @@
|
|||
module github.com/jaegertracing/jaeger-clickhouse/internal/tools
|
||||
|
||||
go 1.17
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/golangci/golangci-lint v1.41.1
|
||||
golang.org/x/tools v0.1.5
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a // indirect
|
||||
|
@ -42,7 +47,6 @@ require (
|
|||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
||||
github.com/golangci/golangci-lint v1.41.1
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
||||
github.com/golangci/misspell v0.3.5 // indirect
|
||||
|
@ -128,7 +132,6 @@ require (
|
|||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
golang.org/x/tools v0.1.5
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
gopkg.in/ini.v1 v1.51.0 // indirect
|
||||
|
|
|
@ -82,7 +82,6 @@ github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbz
|
|||
github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM=
|
||||
github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.Table}}
|
||||
ON CLUSTER '{cluster}' AS {{.Database}}.{{.Table}}_local
|
||||
ENGINE = Distributed('{cluster}', {{.Database}}, {{.Table}}_local, {{.Hash}})
|
|
@ -0,0 +1,28 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.SpansIndexTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
(
|
||||
{{if .Multitenant -}}
|
||||
tenant LowCardinality(String) CODEC (ZSTD(1)),
|
||||
{{- end -}}
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
service LowCardinality(String) CODEC (ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC (ZSTD(1)),
|
||||
durationUs UInt64 CODEC (ZSTD(1)),
|
||||
tags Nested
|
||||
(
|
||||
key LowCardinality(String),
|
||||
value String
|
||||
) CODEC (ZSTD(1)),
|
||||
INDEX idx_tag_keys tags.key TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE {{if .Replication}}ReplicatedMergeTree{{else}}MergeTree(){{end}}
|
||||
{{.TTLTimestamp}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toDate(timestamp)
|
||||
)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity = 1024
|
|
@ -0,0 +1,43 @@
|
|||
CREATE MATERIALIZED VIEW IF NOT EXISTS {{.OperationsTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
ENGINE {{if .Replication}}ReplicatedSummingMergeTree{{else}}SummingMergeTree{{end}}
|
||||
{{.TTLDate}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toYYYYMM(date)
|
||||
)
|
||||
ORDER BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
date,
|
||||
service,
|
||||
operation
|
||||
)
|
||||
SETTINGS index_granularity = 32
|
||||
POPULATE
|
||||
AS SELECT
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() AS count,
|
||||
if(
|
||||
has(tags.key, 'span.kind'),
|
||||
tags.value[indexOf(tags.key, 'span.kind')],
|
||||
''
|
||||
) AS spankind
|
||||
FROM {{.Database}}.{{.SpansIndexTable}}
|
||||
GROUP BY
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
date,
|
||||
service,
|
||||
operation,
|
||||
tags.key,
|
||||
tags.value
|
|
@ -0,0 +1,19 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.SpansArchiveTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
(
|
||||
{{if .Multitenant -}}
|
||||
tenant LowCardinality(String) CODEC (ZSTD(1)),
|
||||
{{- end -}}
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
model String CODEC (ZSTD(3))
|
||||
) ENGINE {{if .Replication}}ReplicatedMergeTree{{else}}MergeTree(){{end}}
|
||||
{{.TTLTimestamp}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toYYYYMM(timestamp)
|
||||
)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity = 1024
|
|
@ -0,0 +1,19 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.SpansTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
(
|
||||
{{if .Multitenant -}}
|
||||
tenant LowCardinality(String) CODEC (ZSTD(1)),
|
||||
{{- end -}}
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
model String CODEC (ZSTD(3))
|
||||
) ENGINE {{if .Replication}}ReplicatedMergeTree{{else}}MergeTree(){{end}}
|
||||
{{.TTLTimestamp}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toDate(timestamp)
|
||||
)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity = 1024
|
|
@ -1,18 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
service LowCardinality(String) CODEC(ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC(ZSTD(1)),
|
||||
durationUs UInt64 CODEC(ZSTD(1)),
|
||||
tags Nested
|
||||
(
|
||||
key LowCardinality(String),
|
||||
value String
|
||||
) CODEC(ZSTD(1)),
|
||||
INDEX idx_tag_keys tags.key TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
%s
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,9 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
model String CODEC(ZSTD(3))
|
||||
) ENGINE MergeTree()
|
||||
%s
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,14 +0,0 @@
|
|||
CREATE MATERIALIZED VIEW IF NOT EXISTS %s
|
||||
ENGINE SummingMergeTree
|
||||
%s
|
||||
PARTITION BY toYYYYMM(date) ORDER BY (date, service, operation)
|
||||
SETTINGS index_granularity=32
|
||||
POPULATE
|
||||
AS SELECT
|
||||
toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() as count,
|
||||
if(has(tags.key, 'span.kind'), tags.value[indexOf(tags.key, 'span.kind')], '') as spankind
|
||||
FROM %s -- Here goes local jaeger index table's name
|
||||
GROUP BY date, service, operation, tags.key, tags.value
|
|
@ -1,9 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
model String CODEC(ZSTD(3))
|
||||
) ENGINE MergeTree()
|
||||
%s
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,19 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s ON CLUSTER '{cluster}'
|
||||
(
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
service LowCardinality(String) CODEC (ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC (ZSTD(1)),
|
||||
durationUs UInt64 CODEC (ZSTD(1)),
|
||||
tags Nested
|
||||
(
|
||||
key LowCardinality(String),
|
||||
value String
|
||||
) CODEC(ZSTD(1)),
|
||||
INDEX idx_tag_keys tags.key TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE ReplicatedMergeTree
|
||||
%s
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity = 1024;
|
|
@ -1,10 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s ON CLUSTER '{cluster}'
|
||||
(
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
model String CODEC (ZSTD(3))
|
||||
) ENGINE ReplicatedMergeTree
|
||||
%s
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity = 1024;
|
|
@ -1,13 +0,0 @@
|
|||
CREATE MATERIALIZED VIEW IF NOT EXISTS %s ON CLUSTER '{cluster}'
|
||||
ENGINE ReplicatedMergeTree
|
||||
%s
|
||||
PARTITION BY toYYYYMM(date) ORDER BY (date, service, operation)
|
||||
SETTINGS index_granularity=32
|
||||
POPULATE
|
||||
AS SELECT toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() as count,
|
||||
if(has(tags.key, 'span.kind'), tags.value[indexOf(tags.key, 'span.kind')], '') as spankind
|
||||
FROM %s -- here goes local index table
|
||||
GROUP BY date, service, operation, tags.key, tags.value;
|
|
@ -1,10 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s ON CLUSTER '{cluster}'
|
||||
(
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
model String CODEC (ZSTD(3))
|
||||
) ENGINE ReplicatedMergeTree
|
||||
%s
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity = 1024
|
|
@ -1,3 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s -- global table name
|
||||
ON CLUSTER '{cluster}' AS %s -- local table name
|
||||
ENGINE = Distributed('{cluster}', %s, %s, cityHash64(traceID)); -- local table name
|
|
@ -1,3 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS %s -- operations table
|
||||
ON CLUSTER '{cluster}' AS %s -- local operations table
|
||||
ENGINE = Distributed('{cluster}', %s, %s, rand()); -- local operations table
|
|
@ -46,9 +46,9 @@ func (workerHeap *workerHeap) RemoveWorker(worker *WriteWorker) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (workerHeap *workerHeap) CLoseWorkers() {
|
||||
func (workerHeap *workerHeap) CloseWorkers() {
|
||||
for _, item := range *workerHeap.elems {
|
||||
item.worker.CLose()
|
||||
item.worker.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,8 @@ func (conv ConverterMock) ConvertValue(v interface{}) (driver.Value, error) {
|
|||
return driver.Value(t), nil
|
||||
case int64:
|
||||
return driver.Value(t), nil
|
||||
case uint64:
|
||||
return driver.Value(t), nil
|
||||
case int:
|
||||
return driver.Value(t), nil
|
||||
case []string:
|
||||
|
|
|
@ -3,7 +3,7 @@ package mocks
|
|||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
func GetDbMock() (*sql.DB, sqlmock.Sqlmock, error) {
|
||||
|
|
|
@ -5,9 +5,8 @@ import (
|
|||
"log"
|
||||
"testing"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
const levelCount = 5
|
||||
|
|
|
@ -5,9 +5,8 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -4,15 +4,16 @@ import (
|
|||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
// WriteParams contains parameters that are shared between WriteWorker`s
|
||||
type WriteParams struct {
|
||||
// WorkerParams contains parameters that are shared between WriteWorkers
|
||||
type WorkerParams struct {
|
||||
logger hclog.Logger
|
||||
db *sql.DB
|
||||
indexTable TableName
|
||||
spansTable TableName
|
||||
tenant string
|
||||
encoding Encoding
|
||||
delay time.Duration
|
||||
}
|
||||
|
|
|
@ -4,9 +4,8 @@ import (
|
|||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -24,7 +23,7 @@ var (
|
|||
// Given a new batch, WriteWorkerPool creates a new WriteWorker.
|
||||
// If the number of currently processed spans if more than maxSpanCount, then the oldest worker is removed.
|
||||
type WriteWorkerPool struct {
|
||||
params *WriteParams
|
||||
params *WorkerParams
|
||||
|
||||
finish chan bool
|
||||
done sync.WaitGroup
|
||||
|
@ -38,7 +37,7 @@ type WriteWorkerPool struct {
|
|||
|
||||
var registerPoolMetrics sync.Once
|
||||
|
||||
func NewWorkerPool(params *WriteParams, maxSpanCount int) WriteWorkerPool {
|
||||
func NewWorkerPool(params *WorkerParams, maxSpanCount int) WriteWorkerPool {
|
||||
registerPoolMetrics.Do(func() {
|
||||
prometheus.MustRegister(numDiscardedSpans, numPendingSpans)
|
||||
})
|
||||
|
@ -101,7 +100,7 @@ func (pool *WriteWorkerPool) Work() {
|
|||
pool.params.logger.Error("could not remove worker", "worker", worker, "error", err)
|
||||
}
|
||||
case <-pool.finish:
|
||||
pool.workers.CLoseWorkers()
|
||||
pool.workers.CloseWorkers()
|
||||
finish = true
|
||||
}
|
||||
pool.done.Done()
|
||||
|
@ -116,7 +115,7 @@ func (pool *WriteWorkerPool) WriteBatch(batch []*model.Span) {
|
|||
pool.batches <- batch
|
||||
}
|
||||
|
||||
func (pool *WriteWorkerPool) CLose() {
|
||||
func (pool *WriteWorkerPool) Close() {
|
||||
pool.finish <- true
|
||||
pool.done.Wait()
|
||||
}
|
||||
|
|
|
@ -10,10 +10,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -34,18 +33,20 @@ type TraceReader struct {
|
|||
operationsTable TableName
|
||||
indexTable TableName
|
||||
spansTable TableName
|
||||
tenant string
|
||||
maxNumSpans uint
|
||||
}
|
||||
|
||||
var _ spanstore.Reader = (*TraceReader)(nil)
|
||||
|
||||
// NewTraceReader returns a TraceReader for the database
|
||||
func NewTraceReader(db *sql.DB, operationsTable, indexTable, spansTable TableName, maxNumSpans uint) *TraceReader {
|
||||
func NewTraceReader(db *sql.DB, operationsTable, indexTable, spansTable TableName, tenant string, maxNumSpans uint) *TraceReader {
|
||||
return &TraceReader{
|
||||
db: db,
|
||||
operationsTable: operationsTable,
|
||||
indexTable: indexTable,
|
||||
spansTable: spansTable,
|
||||
tenant: tenant,
|
||||
maxNumSpans: maxNumSpans,
|
||||
}
|
||||
}
|
||||
|
@ -60,24 +61,29 @@ func (r *TraceReader) getTraces(ctx context.Context, traceIDs []model.TraceID) (
|
|||
span, _ := opentracing.StartSpanFromContext(ctx, "getTraces")
|
||||
defer span.Finish()
|
||||
|
||||
values := make([]interface{}, len(traceIDs))
|
||||
args := make([]interface{}, len(traceIDs))
|
||||
for i, traceID := range traceIDs {
|
||||
values[i] = traceID.String()
|
||||
args[i] = traceID.String()
|
||||
}
|
||||
|
||||
// It's more efficient to do PREWHERE on traceID to the only read needed models:
|
||||
// * https://clickhouse.tech/docs/en/sql-reference/statements/select/prewhere/
|
||||
//nolint:gosec , G201: SQL string formatting
|
||||
query := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (%s)", r.spansTable, "?"+strings.Repeat(",?", len(values)-1))
|
||||
query := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (%s)", r.spansTable, "?"+strings.Repeat(",?", len(traceIDs)-1))
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " AND tenant = ?"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
if r.maxNumSpans > 0 {
|
||||
query += fmt.Sprintf(" ORDER BY timestamp LIMIT %d BY traceID", r.maxNumSpans)
|
||||
}
|
||||
|
||||
span.SetTag("db.statement", query)
|
||||
span.SetTag("db.args", values)
|
||||
span.SetTag("db.args", args)
|
||||
|
||||
rows, err := r.db.QueryContext(ctx, query, values...)
|
||||
rows, err := r.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -177,11 +183,19 @@ func (r *TraceReader) GetServices(ctx context.Context) ([]string, error) {
|
|||
return nil, errNoOperationsTable
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("SELECT service FROM %s GROUP BY service", r.operationsTable)
|
||||
query := fmt.Sprintf("SELECT service FROM %s", r.operationsTable)
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " WHERE tenant = ?"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
query += " GROUP BY service"
|
||||
span.SetTag("db.statement", query)
|
||||
span.SetTag("db.args", args)
|
||||
|
||||
return r.getStrings(ctx, query)
|
||||
return r.getStrings(ctx, query, args...)
|
||||
}
|
||||
|
||||
// GetOperations fetches operations in the service and empty slice if service does not exists
|
||||
|
@ -197,8 +211,16 @@ func (r *TraceReader) GetOperations(
|
|||
}
|
||||
|
||||
//nolint:gosec , G201: SQL string formatting
|
||||
query := fmt.Sprintf("SELECT operation, spankind FROM %s WHERE service = ? GROUP BY operation, spankind ORDER BY operation", r.operationsTable)
|
||||
args := []interface{}{params.ServiceName}
|
||||
query := fmt.Sprintf("SELECT operation, spankind FROM %s WHERE", r.operationsTable)
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " tenant = ? AND"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
query += " service = ? GROUP BY operation, spankind ORDER BY operation"
|
||||
args = append(args, params.ServiceName)
|
||||
|
||||
span.SetTag("db.statement", query)
|
||||
span.SetTag("db.args", args)
|
||||
|
@ -325,16 +347,18 @@ func (r *TraceReader) findTraceIDsInRange(ctx context.Context, params *spanstore
|
|||
query := fmt.Sprintf("SELECT DISTINCT traceID FROM %s WHERE service = ?", r.indexTable)
|
||||
args := []interface{}{params.ServiceName}
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " AND tenant = ?"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
if params.OperationName != "" {
|
||||
query += " AND operation = ?"
|
||||
args = append(args, params.OperationName)
|
||||
}
|
||||
|
||||
query += " AND timestamp >= ?"
|
||||
args = append(args, start)
|
||||
|
||||
query += " AND timestamp <= ?"
|
||||
args = append(args, end)
|
||||
query += " AND timestamp >= ? AND timestamp <= ?"
|
||||
args = append(args, start, end)
|
||||
|
||||
if params.DurationMin != 0 {
|
||||
query += " AND durationUs >= ?"
|
||||
|
|
|
@ -10,14 +10,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore/mocks"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore/mocks"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -30,82 +30,99 @@ const (
|
|||
var testStartTime = time.Date(2010, 3, 15, 7, 40, 0, 0, time.UTC)
|
||||
|
||||
func TestTraceReader_FindTraceIDs(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
service := "service"
|
||||
start := testStartTime
|
||||
end := start.Add(24 * time.Hour)
|
||||
fullDuration := end.Sub(start)
|
||||
duration := fullDuration
|
||||
for i := 0; i < maxProgressiveSteps; i++ {
|
||||
duration /= 2
|
||||
}
|
||||
params := spanstore.TraceQueryParameters{
|
||||
ServiceName: service,
|
||||
NumTraces: testNumTraces,
|
||||
StartTimeMin: start,
|
||||
StartTimeMax: end,
|
||||
|
||||
tests := map[string]struct {
|
||||
queryTemplate string
|
||||
firstArgs []driver.Value
|
||||
tenant string
|
||||
}{
|
||||
"default": {
|
||||
queryTemplate: "SELECT DISTINCT traceID FROM %s WHERE service = ? AND timestamp >= ? AND timestamp <= ?%s ORDER BY service, timestamp DESC LIMIT ?",
|
||||
firstArgs: []driver.Value{service},
|
||||
},
|
||||
"tenant": {
|
||||
queryTemplate: "SELECT DISTINCT traceID FROM %s WHERE service = ? AND tenant = ? AND timestamp >= ? AND timestamp <= ?%s ORDER BY service, timestamp DESC LIMIT ?",
|
||||
firstArgs: []driver.Value{service, testTenant},
|
||||
tenant: testTenant,
|
||||
},
|
||||
}
|
||||
|
||||
expectedTraceIDs := make([]model.TraceID, testNumTraces)
|
||||
traceIDValues := make([]driver.Value, testNumTraces)
|
||||
for i := range expectedTraceIDs {
|
||||
traceID := model.TraceID{Low: uint64(i)}
|
||||
expectedTraceIDs[i] = traceID
|
||||
traceIDValues[i] = traceID.String()
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
start := testStartTime
|
||||
end := start.Add(24 * time.Hour)
|
||||
fullDuration := end.Sub(start)
|
||||
duration := fullDuration
|
||||
for i := 0; i < maxProgressiveSteps; i++ {
|
||||
duration /= 2
|
||||
}
|
||||
params := spanstore.TraceQueryParameters{
|
||||
ServiceName: service,
|
||||
NumTraces: testNumTraces,
|
||||
StartTimeMin: start,
|
||||
StartTimeMax: end,
|
||||
}
|
||||
|
||||
expectedTraceIDs := make([]model.TraceID, testNumTraces)
|
||||
traceIDValues := make([]driver.Value, testNumTraces)
|
||||
for i := range expectedTraceIDs {
|
||||
traceID := model.TraceID{Low: uint64(i)}
|
||||
expectedTraceIDs[i] = traceID
|
||||
traceIDValues[i] = traceID.String()
|
||||
}
|
||||
|
||||
found := traceIDValues[:0]
|
||||
endArg := end
|
||||
for i := 0; i < maxProgressiveSteps; i++ {
|
||||
if i == maxProgressiveSteps-1 {
|
||||
duration = fullDuration
|
||||
}
|
||||
|
||||
startArg := endArg.Add(-duration)
|
||||
if startArg.Before(start) {
|
||||
startArg = start
|
||||
}
|
||||
|
||||
// Select how many spans query will return
|
||||
index := int(math.Min(float64(i*2+1), testNumTraces))
|
||||
if i == maxProgressiveSteps-1 {
|
||||
index = testNumTraces
|
||||
}
|
||||
args := test.firstArgs
|
||||
args = append(args, startArg)
|
||||
args = append(args, endArg)
|
||||
args = append(args, found...)
|
||||
args = append(args, testNumTraces-len(found))
|
||||
mock.
|
||||
ExpectQuery(fmt.Sprintf(
|
||||
test.queryTemplate,
|
||||
testIndexTable,
|
||||
func() string {
|
||||
if len(found) == 0 {
|
||||
return ""
|
||||
}
|
||||
return " AND traceID NOT IN (?" + strings.Repeat(",?", len(found)-1) + ")"
|
||||
}(),
|
||||
)).
|
||||
WithArgs(args...).
|
||||
WillReturnRows(getRows(traceIDValues[len(found):index]))
|
||||
endArg = startArg
|
||||
duration *= 2
|
||||
found = traceIDValues[:index]
|
||||
}
|
||||
|
||||
traceIDs, err := traceReader.FindTraceIDs(context.Background(), ¶ms)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedTraceIDs, traceIDs)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
})
|
||||
}
|
||||
|
||||
found := traceIDValues[:0]
|
||||
endArg := end
|
||||
for i := 0; i < maxProgressiveSteps; i++ {
|
||||
if i == maxProgressiveSteps-1 {
|
||||
duration = fullDuration
|
||||
}
|
||||
|
||||
startArg := endArg.Add(-duration)
|
||||
if startArg.Before(start) {
|
||||
startArg = start
|
||||
}
|
||||
|
||||
// Select how many spans query will return
|
||||
index := int(math.Min(float64(i*2+1), testNumTraces))
|
||||
if i == maxProgressiveSteps-1 {
|
||||
index = testNumTraces
|
||||
}
|
||||
args := append(
|
||||
append(
|
||||
[]driver.Value{
|
||||
service,
|
||||
startArg,
|
||||
endArg,
|
||||
},
|
||||
found...),
|
||||
testNumTraces-len(found))
|
||||
mock.
|
||||
ExpectQuery(fmt.Sprintf(
|
||||
"SELECT DISTINCT traceID FROM %s WHERE service = ? AND timestamp >= ? AND timestamp <= ?%s ORDER BY service, timestamp DESC LIMIT ?",
|
||||
testIndexTable,
|
||||
func() string {
|
||||
if len(found) == 0 {
|
||||
return ""
|
||||
}
|
||||
return " AND traceID NOT IN (?" + strings.Repeat(",?", len(found)-1) + ")"
|
||||
}(),
|
||||
)).
|
||||
WithArgs(args...).
|
||||
WillReturnRows(getRows(traceIDValues[len(found):index]))
|
||||
endArg = startArg
|
||||
duration *= 2
|
||||
found = traceIDValues[:index]
|
||||
}
|
||||
|
||||
traceIDs, err := traceReader.FindTraceIDs(context.Background(), ¶ms)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedTraceIDs, traceIDs)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestTraceReader_FindTraceIDsShortDurationAfterReduction(t *testing.T) {
|
||||
|
@ -113,7 +130,7 @@ func TestTraceReader_FindTraceIDsShortDurationAfterReduction(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "service"
|
||||
start := testStartTime
|
||||
end := start.Add(8 * time.Hour)
|
||||
|
@ -196,7 +213,7 @@ func TestTraceReader_FindTraceIDsEarlyExit(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "service"
|
||||
start := testStartTime
|
||||
end := start.Add(24 * time.Hour)
|
||||
|
@ -249,7 +266,7 @@ func TestTraceReader_FindTraceIDsShortRange(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "service"
|
||||
start := testStartTime
|
||||
end := start.Add(time.Hour)
|
||||
|
@ -292,7 +309,7 @@ func TestTraceReader_FindTraceIDsQueryError(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "service"
|
||||
start := testStartTime
|
||||
end := start.Add(24 * time.Hour)
|
||||
|
@ -331,7 +348,7 @@ func TestTraceReader_FindTraceIDsZeroStartTime(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "service"
|
||||
start := time.Time{}
|
||||
end := testStartTime
|
||||
|
@ -349,26 +366,44 @@ func TestTraceReader_FindTraceIDsZeroStartTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTraceReader_GetServices(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
expectedServices := []string{"GET /first", "POST /second", "PUT /third"}
|
||||
expectedServiceValues := make([]driver.Value, len(expectedServices))
|
||||
for i := range expectedServices {
|
||||
expectedServiceValues[i] = expectedServices[i]
|
||||
tests := map[string]struct {
|
||||
query string
|
||||
args []driver.Value
|
||||
tenant string
|
||||
}{
|
||||
"default": {
|
||||
query: fmt.Sprintf("SELECT service FROM %s GROUP BY service", testOperationsTable),
|
||||
args: []driver.Value{},
|
||||
},
|
||||
"tenant": {
|
||||
query: fmt.Sprintf("SELECT service FROM %s WHERE tenant = ? GROUP BY service", testOperationsTable),
|
||||
args: []driver.Value{testTenant},
|
||||
tenant: testTenant,
|
||||
},
|
||||
}
|
||||
queryResult := getRows(expectedServiceValues)
|
||||
|
||||
mock.
|
||||
ExpectQuery(fmt.Sprintf("SELECT service FROM %s GROUP BY service", testOperationsTable)).
|
||||
WillReturnRows(queryResult)
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
services, err := traceReader.GetServices(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedServices, services)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
expectedServices := []string{"GET /first", "POST /second", "PUT /third"}
|
||||
expectedServiceValues := make([]driver.Value, len(expectedServices))
|
||||
for i := range expectedServices {
|
||||
expectedServiceValues[i] = expectedServices[i]
|
||||
}
|
||||
queryResult := getRows(expectedServiceValues)
|
||||
|
||||
mock.ExpectQuery(test.query).WithArgs(test.args...).WillReturnRows(queryResult)
|
||||
|
||||
services, err := traceReader.GetServices(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedServices, services)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTraceReader_GetServicesQueryError(t *testing.T) {
|
||||
|
@ -376,7 +411,7 @@ func TestTraceReader_GetServicesQueryError(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
|
||||
mock.
|
||||
ExpectQuery(fmt.Sprintf("SELECT service FROM %s GROUP BY service", testOperationsTable)).
|
||||
|
@ -392,7 +427,7 @@ func TestTraceReader_GetServicesNoTable(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, "", testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, "", testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
|
||||
services, err := traceReader.GetServices(context.Background())
|
||||
require.ErrorIs(t, err, errNoOperationsTable)
|
||||
|
@ -404,14 +439,27 @@ func TestTraceReader_GetOperations(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
service := "test service"
|
||||
params := spanstore.OperationQueryParameters{ServiceName: service}
|
||||
tests := map[string]struct {
|
||||
tenant string
|
||||
query string
|
||||
args []driver.Value
|
||||
rows *sqlmock.Rows
|
||||
expected []spanstore.Operation
|
||||
}{
|
||||
"default": {
|
||||
query: fmt.Sprintf("SELECT operation, spankind FROM %s WHERE service = ? GROUP BY operation, spankind ORDER BY operation", testOperationsTable),
|
||||
args: []driver.Value{service},
|
||||
rows: sqlmock.NewRows([]string{"operation", "spankind"}).
|
||||
AddRow("operation_1", "client").
|
||||
AddRow("operation_2", ""),
|
||||
expected: []spanstore.Operation{{Name: "operation_1", SpanKind: "client"}, {Name: "operation_2"}},
|
||||
},
|
||||
"tenant": {
|
||||
tenant: testTenant,
|
||||
query: fmt.Sprintf("SELECT operation, spankind FROM %s WHERE tenant = ? AND service = ? GROUP BY operation, spankind ORDER BY operation", testOperationsTable),
|
||||
args: []driver.Value{testTenant, service},
|
||||
rows: sqlmock.NewRows([]string{"operation", "spankind"}).
|
||||
AddRow("operation_1", "client").
|
||||
AddRow("operation_2", ""),
|
||||
|
@ -421,10 +469,11 @@ func TestTraceReader_GetOperations(t *testing.T) {
|
|||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
mock.
|
||||
ExpectQuery(fmt.Sprintf("SELECT operation, spankind FROM %s WHERE service = ? GROUP BY operation, spankind ORDER BY operation", testOperationsTable)).
|
||||
WithArgs(service).
|
||||
ExpectQuery(test.query).
|
||||
WithArgs(test.args...).
|
||||
WillReturnRows(test.rows)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
operations, err := traceReader.GetOperations(context.Background(), params)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expected, operations)
|
||||
|
@ -438,7 +487,7 @@ func TestTraceReader_GetOperationsQueryError(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "test service"
|
||||
params := spanstore.OperationQueryParameters{ServiceName: service}
|
||||
mock.
|
||||
|
@ -457,7 +506,7 @@ func TestTraceReader_GetOperationsNoTable(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, "", testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, "", testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "test service"
|
||||
params := spanstore.OperationQueryParameters{ServiceName: service}
|
||||
operations, err := traceReader.GetOperations(context.Background(), params)
|
||||
|
@ -470,7 +519,6 @@ func TestTraceReader_GetTrace(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceID := model.TraceID{High: 0, Low: 1}
|
||||
spanRefs := generateRandomSpans(testSpansInTrace)
|
||||
trace := model.Trace{}
|
||||
|
@ -484,6 +532,7 @@ func TestTraceReader_GetTrace(t *testing.T) {
|
|||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
tenant string
|
||||
queryResult *sqlmock.Rows
|
||||
expectedTrace *model.Trace
|
||||
expectedError error
|
||||
|
@ -493,11 +542,23 @@ func TestTraceReader_GetTrace(t *testing.T) {
|
|||
expectedTrace: &trace,
|
||||
expectedError: nil,
|
||||
},
|
||||
"json tenant": {
|
||||
tenant: testTenant,
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return json.Marshal(span) }),
|
||||
expectedTrace: &trace,
|
||||
expectedError: nil,
|
||||
},
|
||||
"protobuf": {
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return proto.Marshal(span) }),
|
||||
expectedTrace: &trace,
|
||||
expectedError: nil,
|
||||
},
|
||||
"protobuf tenant": {
|
||||
tenant: testTenant,
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return proto.Marshal(span) }),
|
||||
expectedTrace: &trace,
|
||||
expectedError: nil,
|
||||
},
|
||||
"trace not found": {
|
||||
queryResult: sqlmock.NewRows([]string{"model"}),
|
||||
expectedTrace: nil,
|
||||
|
@ -512,13 +573,23 @@ func TestTraceReader_GetTrace(t *testing.T) {
|
|||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
mock.
|
||||
ExpectQuery(
|
||||
fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?)", testSpansTable),
|
||||
).
|
||||
WithArgs(traceID).
|
||||
WillReturnRows(test.queryResult)
|
||||
if test.tenant == "" {
|
||||
mock.
|
||||
ExpectQuery(
|
||||
fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?)", testSpansTable),
|
||||
).
|
||||
WithArgs(traceID).
|
||||
WillReturnRows(test.queryResult)
|
||||
} else {
|
||||
mock.
|
||||
ExpectQuery(
|
||||
fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?) AND tenant = ?", testSpansTable),
|
||||
).
|
||||
WithArgs(traceID, test.tenant).
|
||||
WillReturnRows(test.queryResult)
|
||||
}
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
trace, err := traceReader.GetTrace(context.Background(), traceID)
|
||||
require.ErrorIs(t, err, test.expectedError)
|
||||
if trace != nil {
|
||||
|
@ -538,7 +609,6 @@ func TestSpanWriter_getTraces(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceIDs := []model.TraceID{
|
||||
{High: 0, Low: 1},
|
||||
{High: 2, Low: 2},
|
||||
|
@ -557,23 +627,65 @@ func TestSpanWriter_getTraces(t *testing.T) {
|
|||
traceIDStrings[i] = traceID.String()
|
||||
}
|
||||
|
||||
defaultQuery := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?,?,?,?)", testSpansTable)
|
||||
tenantQuery := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?,?,?,?) AND tenant = ?", testSpansTable)
|
||||
|
||||
tests := map[string]struct {
|
||||
tenant string
|
||||
query string
|
||||
args []driver.Value
|
||||
queryResult *sqlmock.Rows
|
||||
expectedTraces []*model.Trace
|
||||
}{
|
||||
"JSON encoded traces one span per trace": {
|
||||
query: defaultQuery,
|
||||
args: traceIDStrings,
|
||||
queryResult: getEncodedSpans(spans[:len(traceIDs)], func(span *model.Span) ([]byte, error) { return json.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans[:len(traceIDs)]),
|
||||
},
|
||||
"tenant JSON encoded traces one span per trace": {
|
||||
tenant: testTenant,
|
||||
query: tenantQuery,
|
||||
args: append(traceIDStrings, testTenant),
|
||||
queryResult: getEncodedSpans(spans[:len(traceIDs)], func(span *model.Span) ([]byte, error) { return json.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans[:len(traceIDs)]),
|
||||
},
|
||||
"Protobuf encoded traces one span per trace": {
|
||||
query: defaultQuery,
|
||||
args: traceIDStrings,
|
||||
queryResult: getEncodedSpans(spans[:len(traceIDs)], func(span *model.Span) ([]byte, error) { return proto.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans[:len(traceIDs)]),
|
||||
},
|
||||
"tenant Protobuf encoded traces one span per trace": {
|
||||
tenant: testTenant,
|
||||
query: tenantQuery,
|
||||
args: append(traceIDStrings, testTenant),
|
||||
queryResult: getEncodedSpans(spans[:len(traceIDs)], func(span *model.Span) ([]byte, error) { return proto.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans[:len(traceIDs)]),
|
||||
},
|
||||
"JSON encoded traces many spans per trace": {
|
||||
query: defaultQuery,
|
||||
args: traceIDStrings,
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return json.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans),
|
||||
},
|
||||
"tenant JSON encoded traces many spans per trace": {
|
||||
tenant: testTenant,
|
||||
query: tenantQuery,
|
||||
args: append(traceIDStrings, testTenant),
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return json.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans),
|
||||
},
|
||||
"Protobuf encoded traces many spans per trace": {
|
||||
query: defaultQuery,
|
||||
args: traceIDStrings,
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return proto.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans),
|
||||
},
|
||||
"tenant Protobuf encoded traces many spans per trace": {
|
||||
tenant: testTenant,
|
||||
query: tenantQuery,
|
||||
args: append(traceIDStrings, testTenant),
|
||||
queryResult: getEncodedSpans(spans, func(span *model.Span) ([]byte, error) { return proto.Marshal(span) }),
|
||||
expectedTraces: getTracesFromSpans(spans),
|
||||
},
|
||||
|
@ -582,12 +694,11 @@ func TestSpanWriter_getTraces(t *testing.T) {
|
|||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
mock.
|
||||
ExpectQuery(
|
||||
fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?,?,?,?)", testSpansTable),
|
||||
).
|
||||
WithArgs(traceIDStrings...).
|
||||
ExpectQuery(test.query).
|
||||
WithArgs(test.args...).
|
||||
WillReturnRows(test.queryResult)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
traces, err := traceReader.getTraces(context.Background(), traceIDs)
|
||||
require.NoError(t, err)
|
||||
model.SortTraces(traces)
|
||||
|
@ -602,7 +713,6 @@ func TestSpanWriter_getTracesIncorrectData(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceIDs := []model.TraceID{
|
||||
{High: 0, Low: 1},
|
||||
{High: 2, Low: 2},
|
||||
|
@ -621,17 +731,43 @@ func TestSpanWriter_getTracesIncorrectData(t *testing.T) {
|
|||
traceIDStrings[i] = traceID.String()
|
||||
}
|
||||
|
||||
defaultQuery := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?,?,?,?)", testSpansTable)
|
||||
tenantQuery := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?,?,?,?) AND tenant = ?", testSpansTable)
|
||||
|
||||
tests := map[string]struct {
|
||||
tenant string
|
||||
query string
|
||||
args []driver.Value
|
||||
queryResult *sqlmock.Rows
|
||||
expectedResult []*model.Trace
|
||||
expectedError error
|
||||
}{
|
||||
"JSON encoding incorrect data": {
|
||||
query: defaultQuery,
|
||||
args: traceIDStrings,
|
||||
queryResult: getRows([]driver.Value{[]byte{'{', 'n', 'o', 't', '_', 'a', '_', 'k', 'e', 'y', '}'}}),
|
||||
expectedResult: []*model.Trace(nil),
|
||||
expectedError: fmt.Errorf("invalid character 'n' looking for beginning of object key string"),
|
||||
},
|
||||
"tenant JSON encoding incorrect data": {
|
||||
tenant: testTenant,
|
||||
query: tenantQuery,
|
||||
args: append(traceIDStrings, testTenant),
|
||||
queryResult: getRows([]driver.Value{[]byte{'{', 'n', 'o', 't', '_', 'a', '_', 'k', 'e', 'y', '}'}}),
|
||||
expectedResult: []*model.Trace(nil),
|
||||
expectedError: fmt.Errorf("invalid character 'n' looking for beginning of object key string"),
|
||||
},
|
||||
"Protobuf encoding incorrect data": {
|
||||
query: defaultQuery,
|
||||
args: traceIDStrings,
|
||||
queryResult: getRows([]driver.Value{[]byte{'i', 'n', 'c', 'o', 'r', 'r', 'e', 'c', 't'}}),
|
||||
expectedResult: []*model.Trace{},
|
||||
expectedError: nil,
|
||||
},
|
||||
"tenant Protobuf encoding incorrect data": {
|
||||
tenant: testTenant,
|
||||
query: tenantQuery,
|
||||
args: append(traceIDStrings, testTenant),
|
||||
queryResult: getRows([]driver.Value{[]byte{'i', 'n', 'c', 'o', 'r', 'r', 'e', 'c', 't'}}),
|
||||
expectedResult: []*model.Trace{},
|
||||
expectedError: nil,
|
||||
|
@ -641,12 +777,11 @@ func TestSpanWriter_getTracesIncorrectData(t *testing.T) {
|
|||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
mock.
|
||||
ExpectQuery(
|
||||
fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (?,?,?,?)", testSpansTable),
|
||||
).
|
||||
WithArgs(traceIDStrings...).
|
||||
ExpectQuery(test.query).
|
||||
WithArgs(test.args...).
|
||||
WillReturnRows(test.queryResult)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
traces, err := traceReader.getTraces(context.Background(), traceIDs)
|
||||
if test.expectedError == nil {
|
||||
assert.NoError(t, err)
|
||||
|
@ -664,7 +799,7 @@ func TestSpanWriter_getTracesQueryError(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
traceIDs := []model.TraceID{
|
||||
{High: 0, Low: 1},
|
||||
{High: 2, Low: 2},
|
||||
|
@ -695,7 +830,7 @@ func TestSpanWriter_getTracesRowsScanError(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
traceIDs := []model.TraceID{
|
||||
{High: 0, Low: 1},
|
||||
{High: 2, Low: 2},
|
||||
|
@ -727,7 +862,7 @@ func TestSpanWriter_getTraceNoTraceIDs(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
traceIDs := make([]model.TraceID, 0)
|
||||
|
||||
traces, err := traceReader.getTraces(context.Background(), traceIDs)
|
||||
|
@ -777,7 +912,6 @@ func TestSpanWriter_findTraceIDsInRange(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
service := "test_service"
|
||||
operation := "test_operation"
|
||||
start := time.Unix(0, 0)
|
||||
|
@ -812,6 +946,7 @@ func TestSpanWriter_findTraceIDsInRange(t *testing.T) {
|
|||
tests := map[string]struct {
|
||||
queryParams spanstore.TraceQueryParameters
|
||||
skip []model.TraceID
|
||||
tenant string
|
||||
expectedQuery string
|
||||
expectedArgs []driver.Value
|
||||
}{
|
||||
|
@ -829,6 +964,22 @@ func TestSpanWriter_findTraceIDsInRange(t *testing.T) {
|
|||
testNumTraces,
|
||||
},
|
||||
},
|
||||
"tenant": {
|
||||
queryParams: spanstore.TraceQueryParameters{ServiceName: service, NumTraces: testNumTraces},
|
||||
skip: make([]model.TraceID, 0),
|
||||
tenant: testTenant,
|
||||
expectedQuery: fmt.Sprintf(
|
||||
"SELECT DISTINCT traceID FROM %s WHERE service = ? AND tenant = ? AND timestamp >= ? AND timestamp <= ? ORDER BY service, timestamp DESC LIMIT ?",
|
||||
testIndexTable,
|
||||
),
|
||||
expectedArgs: []driver.Value{
|
||||
service,
|
||||
testTenant,
|
||||
start,
|
||||
end,
|
||||
testNumTraces,
|
||||
},
|
||||
},
|
||||
"maxDuration": {
|
||||
queryParams: spanstore.TraceQueryParameters{ServiceName: service, NumTraces: testNumTraces, DurationMax: maxDuration},
|
||||
skip: make([]model.TraceID, 0),
|
||||
|
@ -922,6 +1073,7 @@ func TestSpanWriter_findTraceIDsInRange(t *testing.T) {
|
|||
WithArgs(test.expectedArgs...).
|
||||
WillReturnRows(queryResult)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
res, err := traceReader.findTraceIDsInRange(
|
||||
context.Background(),
|
||||
&test.queryParams,
|
||||
|
@ -940,7 +1092,7 @@ func TestSpanReader_findTraceIDsInRangeNoIndexTable(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, "", testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, "", testSpansTable, "", testMaxNumSpans)
|
||||
res, err := traceReader.findTraceIDsInRange(
|
||||
context.Background(),
|
||||
nil,
|
||||
|
@ -957,7 +1109,7 @@ func TestSpanReader_findTraceIDsInRangeEndBeforeStart(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
res, err := traceReader.findTraceIDsInRange(
|
||||
context.Background(),
|
||||
nil,
|
||||
|
@ -974,7 +1126,7 @@ func TestSpanReader_findTraceIDsInRangeQueryError(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
service := "test_service"
|
||||
start := time.Unix(0, 0)
|
||||
end := time.Now()
|
||||
|
@ -1008,42 +1160,58 @@ func TestSpanReader_findTraceIDsInRangeIncorrectData(t *testing.T) {
|
|||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
service := "test_service"
|
||||
start := time.Unix(0, 0)
|
||||
end := time.Now()
|
||||
rowValues := []driver.Value{
|
||||
"1",
|
||||
"incorrect value",
|
||||
"3",
|
||||
}
|
||||
queryResult := sqlmock.NewRows([]string{"traceID"})
|
||||
for _, row := range rowValues {
|
||||
queryResult.AddRow(row)
|
||||
|
||||
tests := map[string]struct {
|
||||
query string
|
||||
args []driver.Value
|
||||
tenant string
|
||||
}{
|
||||
"default": {
|
||||
query: fmt.Sprintf(
|
||||
"SELECT DISTINCT traceID FROM %s WHERE service = ? AND timestamp >= ? AND timestamp <= ? ORDER BY service, timestamp DESC LIMIT ?",
|
||||
testIndexTable,
|
||||
),
|
||||
args: []driver.Value{service, start, end, testNumTraces},
|
||||
},
|
||||
"tenant": {
|
||||
query: fmt.Sprintf(
|
||||
"SELECT DISTINCT traceID FROM %s WHERE service = ? AND tenant = ? AND timestamp >= ? AND timestamp <= ? ORDER BY service, timestamp DESC LIMIT ?",
|
||||
testIndexTable,
|
||||
),
|
||||
args: []driver.Value{service, testTenant, start, end, testNumTraces},
|
||||
tenant: testTenant,
|
||||
},
|
||||
}
|
||||
|
||||
mock.
|
||||
ExpectQuery(fmt.Sprintf(
|
||||
"SELECT DISTINCT traceID FROM %s WHERE service = ? AND timestamp >= ? AND timestamp <= ? ORDER BY service, timestamp DESC LIMIT ?",
|
||||
testIndexTable,
|
||||
)).
|
||||
WithArgs(
|
||||
service,
|
||||
start,
|
||||
end,
|
||||
testNumTraces,
|
||||
).
|
||||
WillReturnRows(queryResult)
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, test.tenant, testMaxNumSpans)
|
||||
|
||||
res, err := traceReader.findTraceIDsInRange(
|
||||
context.Background(),
|
||||
&spanstore.TraceQueryParameters{ServiceName: service, NumTraces: testNumTraces},
|
||||
start,
|
||||
end,
|
||||
make([]model.TraceID, 0))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, []model.TraceID(nil), res)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
rowValues := []driver.Value{
|
||||
"1",
|
||||
"incorrect value",
|
||||
"3",
|
||||
}
|
||||
queryResult := sqlmock.NewRows([]string{"traceID"})
|
||||
for _, row := range rowValues {
|
||||
queryResult.AddRow(row)
|
||||
}
|
||||
mock.ExpectQuery(test.query).WithArgs(test.args...).WillReturnRows(queryResult)
|
||||
|
||||
res, err := traceReader.findTraceIDsInRange(
|
||||
context.Background(),
|
||||
&spanstore.TraceQueryParameters{ServiceName: service, NumTraces: testNumTraces},
|
||||
start,
|
||||
end,
|
||||
make([]model.TraceID, 0))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, []model.TraceID(nil), res)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpanReader_getStrings(t *testing.T) {
|
||||
|
@ -1062,7 +1230,7 @@ func TestSpanReader_getStrings(t *testing.T) {
|
|||
}
|
||||
mock.ExpectQuery(query).WithArgs(argValues...).WillReturnRows(result)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
|
||||
queryResult, err := traceReader.getStrings(context.Background(), query, args...)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1080,7 +1248,7 @@ func TestSpanReader_getStringsQueryError(t *testing.T) {
|
|||
args := []interface{}{"a"}
|
||||
mock.ExpectQuery(query).WithArgs(argValues...).WillReturnError(errorMock)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
|
||||
queryResult, err := traceReader.getStrings(context.Background(), query, args...)
|
||||
assert.EqualError(t, err, errorMock.Error())
|
||||
|
@ -1104,7 +1272,7 @@ func TestSpanReader_getStringsRowError(t *testing.T) {
|
|||
result.RowError(2, errorMock)
|
||||
mock.ExpectQuery(query).WithArgs(argValues...).WillReturnRows(result)
|
||||
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, testMaxNumSpans)
|
||||
traceReader := NewTraceReader(db, testOperationsTable, testIndexTable, testSpansTable, "", testMaxNumSpans)
|
||||
|
||||
queryResult, err := traceReader.getStrings(context.Background(), query, args...)
|
||||
assert.EqualError(t, err, errorMock.Error())
|
||||
|
|
|
@ -1,15 +1,7 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type TableName string
|
||||
|
||||
func (tableName TableName) ToLocal() TableName {
|
||||
return tableName + "_local"
|
||||
}
|
||||
|
||||
func (tableName TableName) AddDbName(databaseName string) TableName {
|
||||
return TableName(fmt.Sprintf("%s.%s", databaseName, tableName))
|
||||
}
|
||||
|
|
|
@ -6,10 +6,6 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTableName_AddDbName(t *testing.T) {
|
||||
assert.Equal(t, TableName("database_name.table_name_local"), TableName("table_name_local").AddDbName("database_name"))
|
||||
}
|
||||
|
||||
func TestTableName_ToLocal(t *testing.T) {
|
||||
tableName := TableName("some_table")
|
||||
assert.Equal(t, tableName+"_local", tableName.ToLocal())
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
)
|
||||
|
||||
|
@ -20,11 +19,9 @@ var delays = []int{2, 3, 5, 8}
|
|||
// Interval in seconds between attempts changes due to delays slice, then it remains the same as the last value in delays.
|
||||
type WriteWorker struct {
|
||||
// workerID is an arbitrary identifier for keeping track of this worker in logs
|
||||
workerID int32
|
||||
|
||||
params *WriteParams
|
||||
batch []*model.Span
|
||||
|
||||
workerID int32
|
||||
params *WorkerParams
|
||||
batch []*model.Span
|
||||
finish chan bool
|
||||
workerDone chan *WriteWorker
|
||||
done sync.WaitGroup
|
||||
|
@ -61,7 +58,7 @@ func (worker *WriteWorker) Work() {
|
|||
}
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) CLose() {
|
||||
func (worker *WriteWorker) Close() {
|
||||
worker.finish <- true
|
||||
worker.done.Wait()
|
||||
}
|
||||
|
@ -107,7 +104,14 @@ func (worker *WriteWorker) writeModelBatch(batch []*model.Span) error {
|
|||
}
|
||||
}()
|
||||
|
||||
statement, err := tx.Prepare(fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", worker.params.spansTable))
|
||||
var query string
|
||||
if worker.params.tenant == "" {
|
||||
query = fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", worker.params.spansTable)
|
||||
} else {
|
||||
query = fmt.Sprintf("INSERT INTO %s (tenant, timestamp, traceID, model) VALUES (?, ?, ?, ?)", worker.params.spansTable)
|
||||
}
|
||||
|
||||
statement, err := tx.Prepare(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -127,7 +131,11 @@ func (worker *WriteWorker) writeModelBatch(batch []*model.Span) error {
|
|||
return err
|
||||
}
|
||||
|
||||
_, err = statement.Exec(span.StartTime, span.TraceID.String(), serialized)
|
||||
if worker.params.tenant == "" {
|
||||
_, err = statement.Exec(span.StartTime, span.TraceID.String(), serialized)
|
||||
} else {
|
||||
_, err = statement.Exec(worker.params.tenant, span.StartTime, span.TraceID.String(), serialized)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -153,11 +161,20 @@ func (worker *WriteWorker) writeIndexBatch(batch []*model.Span) error {
|
|||
}
|
||||
}()
|
||||
|
||||
statement, err := tx.Prepare(
|
||||
fmt.Sprintf(
|
||||
var query string
|
||||
if worker.params.tenant == "" {
|
||||
query = fmt.Sprintf(
|
||||
"INSERT INTO %s (timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||
worker.params.indexTable,
|
||||
))
|
||||
)
|
||||
} else {
|
||||
query = fmt.Sprintf(
|
||||
"INSERT INTO %s (tenant, timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
worker.params.indexTable,
|
||||
)
|
||||
}
|
||||
|
||||
statement, err := tx.Prepare(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -166,15 +183,28 @@ func (worker *WriteWorker) writeIndexBatch(batch []*model.Span) error {
|
|||
|
||||
for _, span := range batch {
|
||||
keys, values := uniqueTagsForSpan(span)
|
||||
_, err = statement.Exec(
|
||||
span.StartTime,
|
||||
span.TraceID.String(),
|
||||
span.Process.ServiceName,
|
||||
span.OperationName,
|
||||
span.Duration.Microseconds(),
|
||||
keys,
|
||||
values,
|
||||
)
|
||||
if worker.params.tenant == "" {
|
||||
_, err = statement.Exec(
|
||||
span.StartTime,
|
||||
span.TraceID.String(),
|
||||
span.Process.ServiceName,
|
||||
span.OperationName,
|
||||
uint64(span.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
)
|
||||
} else {
|
||||
_, err = statement.Exec(
|
||||
worker.params.tenant,
|
||||
span.StartTime,
|
||||
span.TraceID.String(),
|
||||
span.Process.ServiceName,
|
||||
span.OperationName,
|
||||
uint64(span.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -10,15 +10,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore/mocks"
|
||||
)
|
||||
|
@ -29,6 +26,7 @@ const (
|
|||
testLogFieldCount = 5
|
||||
testIndexTable = "test_index_table"
|
||||
testSpansTable = "test_spans_table"
|
||||
testTenant = "test_tenant"
|
||||
)
|
||||
|
||||
type expectation struct {
|
||||
|
@ -58,7 +56,19 @@ var (
|
|||
testSpan.TraceID.String(),
|
||||
testSpan.Process.GetServiceName(),
|
||||
testSpan.OperationName,
|
||||
testSpan.Duration.Microseconds(),
|
||||
uint64(testSpan.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
}}}
|
||||
indexWriteExpectationTenant = expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (tenant, timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", testIndexTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
testTenant,
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
testSpan.Process.GetServiceName(),
|
||||
testSpan.OperationName,
|
||||
uint64(testSpan.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
}}}
|
||||
|
@ -135,13 +145,16 @@ func TestSpanWriter_UniqueTagsForSpan(t *testing.T) {
|
|||
func TestSpanWriter_General(t *testing.T) {
|
||||
spanJSON, err := json.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectationJSON := getModelWriteExpectation(spanJSON)
|
||||
modelWriteExpectationJSON := getModelWriteExpectation(spanJSON, "")
|
||||
modelWriteExpectationJSONTenant := getModelWriteExpectation(spanJSON, testTenant)
|
||||
spanProto, err := proto.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectationProto := getModelWriteExpectation(spanProto)
|
||||
modelWriteExpectationProto := getModelWriteExpectation(spanProto, "")
|
||||
modelWriteExpectationProtoTenant := getModelWriteExpectation(spanProto, testTenant)
|
||||
tests := map[string]struct {
|
||||
encoding Encoding
|
||||
indexTable TableName
|
||||
tenant string
|
||||
spans []*model.Span
|
||||
expectations []expectation
|
||||
action func(writeWorker *WriteWorker, spans []*model.Span) error
|
||||
|
@ -154,6 +167,14 @@ func TestSpanWriter_General(t *testing.T) {
|
|||
expectations: []expectation{indexWriteExpectation},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeIndexBatch(spans) },
|
||||
},
|
||||
"write index tenant batch": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{indexWriteExpectationTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeIndexBatch(spans) },
|
||||
},
|
||||
"write model batch JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
|
@ -161,13 +182,29 @@ func TestSpanWriter_General(t *testing.T) {
|
|||
expectations: []expectation{modelWriteExpectationJSON},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write model bach Proto": {
|
||||
"write model tenant batch JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSONTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write model batch Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProto},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write model tenant batch Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProtoTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write batch no index JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: "",
|
||||
|
@ -192,6 +229,15 @@ func TestSpanWriter_General(t *testing.T) {
|
|||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch tenant JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSONTenant, indexWriteExpectationTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
|
@ -200,6 +246,15 @@ func TestSpanWriter_General(t *testing.T) {
|
|||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch tenant Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProtoTenant, indexWriteExpectationTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
|
@ -209,7 +264,7 @@ func TestSpanWriter_General(t *testing.T) {
|
|||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
worker := getWriteWorker(spyLogger, db, test.encoding, test.indexTable)
|
||||
worker := getWriteWorker(spyLogger, db, test.encoding, test.indexTable, test.tenant)
|
||||
|
||||
for _, expectation := range test.expectations {
|
||||
mock.ExpectBegin()
|
||||
|
@ -247,7 +302,7 @@ func TestSpanWriter_BeginError(t *testing.T) {
|
|||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable)
|
||||
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable, "")
|
||||
|
||||
mock.ExpectBegin().WillReturnError(errorMock)
|
||||
|
||||
|
@ -261,10 +316,12 @@ func TestSpanWriter_BeginError(t *testing.T) {
|
|||
func TestSpanWriter_PrepareError(t *testing.T) {
|
||||
spanJSON, err := json.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectation := getModelWriteExpectation(spanJSON)
|
||||
modelWriteExpectation := getModelWriteExpectation(spanJSON, "")
|
||||
modelWriteExpectationTenant := getModelWriteExpectation(spanJSON, testTenant)
|
||||
|
||||
tests := map[string]struct {
|
||||
action func(writeWorker *WriteWorker) error
|
||||
tenant string
|
||||
expectation expectation
|
||||
expectedLogs []mocks.LogMock
|
||||
}{
|
||||
|
@ -272,15 +329,31 @@ func TestSpanWriter_PrepareError(t *testing.T) {
|
|||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) },
|
||||
expectation: modelWriteExpectation,
|
||||
},
|
||||
"write model tenant batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) },
|
||||
tenant: testTenant,
|
||||
expectation: modelWriteExpectationTenant,
|
||||
},
|
||||
"write index batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) },
|
||||
expectation: indexWriteExpectation,
|
||||
},
|
||||
"write index tenant batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) },
|
||||
tenant: testTenant,
|
||||
expectation: indexWriteExpectationTenant,
|
||||
},
|
||||
"write batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
|
||||
expectation: modelWriteExpectation,
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write tenant batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
|
||||
tenant: testTenant,
|
||||
expectation: modelWriteExpectationTenant,
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
|
@ -290,7 +363,7 @@ func TestSpanWriter_PrepareError(t *testing.T) {
|
|||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
spanWriter := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable)
|
||||
spanWriter := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable, test.tenant)
|
||||
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectPrepare(test.expectation.preparation).WillReturnError(errorMock)
|
||||
|
@ -306,9 +379,11 @@ func TestSpanWriter_PrepareError(t *testing.T) {
|
|||
func TestSpanWriter_ExecError(t *testing.T) {
|
||||
spanJSON, err := json.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectation := getModelWriteExpectation(spanJSON)
|
||||
modelWriteExpectation := getModelWriteExpectation(spanJSON, "")
|
||||
modelWriteExpectationTenant := getModelWriteExpectation(spanJSON, testTenant)
|
||||
tests := map[string]struct {
|
||||
indexTable TableName
|
||||
tenant string
|
||||
expectations []expectation
|
||||
action func(writer *WriteWorker) error
|
||||
expectedLogs []mocks.LogMock
|
||||
|
@ -318,11 +393,23 @@ func TestSpanWriter_ExecError(t *testing.T) {
|
|||
expectations: []expectation{modelWriteExpectation},
|
||||
action: func(writer *WriteWorker) error { return writer.writeModelBatch(testSpans) },
|
||||
},
|
||||
"write model tenant batch": {
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
expectations: []expectation{modelWriteExpectationTenant},
|
||||
action: func(writer *WriteWorker) error { return writer.writeModelBatch(testSpans) },
|
||||
},
|
||||
"write index batch": {
|
||||
indexTable: testIndexTable,
|
||||
expectations: []expectation{indexWriteExpectation},
|
||||
action: func(writer *WriteWorker) error { return writer.writeIndexBatch(testSpans) },
|
||||
},
|
||||
"write index tenant batch": {
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
expectations: []expectation{indexWriteExpectationTenant},
|
||||
action: func(writer *WriteWorker) error { return writer.writeIndexBatch(testSpans) },
|
||||
},
|
||||
"write batch no index": {
|
||||
indexTable: "",
|
||||
expectations: []expectation{modelWriteExpectation},
|
||||
|
@ -335,6 +422,13 @@ func TestSpanWriter_ExecError(t *testing.T) {
|
|||
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write tenant batch": {
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
expectations: []expectation{modelWriteExpectationTenant, indexWriteExpectationTenant},
|
||||
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
|
@ -344,7 +438,7 @@ func TestSpanWriter_ExecError(t *testing.T) {
|
|||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable)
|
||||
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable, test.tenant)
|
||||
|
||||
for i, expectation := range test.expectations {
|
||||
mock.ExpectBegin()
|
||||
|
@ -367,13 +461,14 @@ func TestSpanWriter_ExecError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func getWriteWorker(spyLogger mocks.SpyLogger, db *sql.DB, encoding Encoding, indexTable TableName) WriteWorker {
|
||||
func getWriteWorker(spyLogger mocks.SpyLogger, db *sql.DB, encoding Encoding, indexTable TableName, tenant string) WriteWorker {
|
||||
return WriteWorker{
|
||||
params: &WriteParams{
|
||||
params: &WorkerParams{
|
||||
logger: spyLogger,
|
||||
db: db,
|
||||
spansTable: testSpansTable,
|
||||
indexTable: indexTable,
|
||||
tenant: tenant,
|
||||
encoding: encoding,
|
||||
},
|
||||
workerDone: make(chan *WriteWorker),
|
||||
|
@ -433,13 +528,25 @@ func generateRandomKeyValues(count int) []model.KeyValue {
|
|||
return tags
|
||||
}
|
||||
|
||||
func getModelWriteExpectation(spanJSON []byte) expectation {
|
||||
return expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", testSpansTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
spanJSON,
|
||||
}},
|
||||
func getModelWriteExpectation(spanJSON []byte, tenant string) expectation {
|
||||
if tenant == "" {
|
||||
return expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", testSpansTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
spanJSON,
|
||||
}},
|
||||
}
|
||||
} else {
|
||||
return expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (tenant, timestamp, traceID, model) VALUES (?, ?, ?, ?)", testSpansTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
tenant,
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
spanJSON,
|
||||
}},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,11 +6,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type Encoding string
|
||||
|
@ -35,7 +34,7 @@ var (
|
|||
|
||||
// SpanWriter for writing spans to ClickHouse
|
||||
type SpanWriter struct {
|
||||
writeParams WriteParams
|
||||
workerParams WorkerParams
|
||||
|
||||
size int64
|
||||
spans chan *model.Span
|
||||
|
@ -52,17 +51,19 @@ func NewSpanWriter(
|
|||
db *sql.DB,
|
||||
indexTable,
|
||||
spansTable TableName,
|
||||
tenant string,
|
||||
encoding Encoding,
|
||||
delay time.Duration,
|
||||
size int64,
|
||||
maxSpanCount int,
|
||||
) *SpanWriter {
|
||||
writer := &SpanWriter{
|
||||
writeParams: WriteParams{
|
||||
workerParams: WorkerParams{
|
||||
logger: logger,
|
||||
db: db,
|
||||
indexTable: indexTable,
|
||||
spansTable: spansTable,
|
||||
tenant: tenant,
|
||||
encoding: encoding,
|
||||
delay: delay,
|
||||
},
|
||||
|
@ -85,11 +86,11 @@ func (w *SpanWriter) registerMetrics() {
|
|||
}
|
||||
|
||||
func (w *SpanWriter) backgroundWriter(maxSpanCount int) {
|
||||
pool := NewWorkerPool(&w.writeParams, maxSpanCount)
|
||||
pool := NewWorkerPool(&w.workerParams, maxSpanCount)
|
||||
go pool.Work()
|
||||
batch := make([]*model.Span, 0, w.size)
|
||||
|
||||
timer := time.After(w.writeParams.delay)
|
||||
timer := time.After(w.workerParams.delay)
|
||||
last := time.Now()
|
||||
|
||||
for {
|
||||
|
@ -103,20 +104,20 @@ func (w *SpanWriter) backgroundWriter(maxSpanCount int) {
|
|||
batch = append(batch, span)
|
||||
flush = len(batch) == cap(batch)
|
||||
if flush {
|
||||
w.writeParams.logger.Debug("Flush due to batch size", "size", len(batch))
|
||||
w.workerParams.logger.Debug("Flush due to batch size", "size", len(batch))
|
||||
numWritesWithBatchSize.Inc()
|
||||
}
|
||||
case <-timer:
|
||||
timer = time.After(w.writeParams.delay)
|
||||
flush = time.Since(last) > w.writeParams.delay && len(batch) > 0
|
||||
timer = time.After(w.workerParams.delay)
|
||||
flush = time.Since(last) > w.workerParams.delay && len(batch) > 0
|
||||
if flush {
|
||||
w.writeParams.logger.Debug("Flush due to timer")
|
||||
w.workerParams.logger.Debug("Flush due to timer")
|
||||
numWritesWithFlushInterval.Inc()
|
||||
}
|
||||
case <-w.finish:
|
||||
finish = true
|
||||
flush = len(batch) > 0
|
||||
w.writeParams.logger.Debug("Finish channel")
|
||||
w.workerParams.logger.Debug("Finish channel")
|
||||
}
|
||||
|
||||
if flush {
|
||||
|
@ -127,7 +128,7 @@ func (w *SpanWriter) backgroundWriter(maxSpanCount int) {
|
|||
}
|
||||
|
||||
if finish {
|
||||
pool.CLose()
|
||||
pool.Close()
|
||||
}
|
||||
w.done.Done()
|
||||
|
||||
|
|
|
@ -38,10 +38,17 @@ type Configuration struct {
|
|||
MaxSpanCount int `yaml:"max_span_count"`
|
||||
// Encoding either json or protobuf. Default is json.
|
||||
Encoding EncodingType `yaml:"encoding"`
|
||||
// ClickHouse address e.g. tcp://localhost:9000.
|
||||
// ClickHouse address e.g. localhost:9000.
|
||||
Address string `yaml:"address"`
|
||||
// Directory with .sql files that are run at plugin startup.
|
||||
// Directory with .sql files to run at plugin startup, mainly for integration tests.
|
||||
// Depending on the value of init_tables, this can be run as a
|
||||
// replacement or supplement to creating default tables for span storage.
|
||||
// If init_tables is also enabled, the scripts in this directory will be run first.
|
||||
InitSQLScriptsDir string `yaml:"init_sql_scripts_dir"`
|
||||
// Whether to automatically attempt to create tables in ClickHouse.
|
||||
// By default, this is enabled if init_sql_scripts_dir is empty,
|
||||
// or disabled if init_sql_scripts_dir is provided.
|
||||
InitTables *bool `yaml:"init_tables"`
|
||||
// Indicates location of TLS certificate used to connect to database.
|
||||
CaFile string `yaml:"ca_file"`
|
||||
// Username for connection to database. Default is "default".
|
||||
|
@ -54,6 +61,8 @@ type Configuration struct {
|
|||
MetricsEndpoint string `yaml:"metrics_endpoint"`
|
||||
// Whether to use SQL scripts supporting replication and sharding. Default false.
|
||||
Replication bool `yaml:"replication"`
|
||||
// If non-empty, enables multitenancy in SQL scripts, and assigns the tenant name for this instance.
|
||||
Tenant string `yaml:"tenant"`
|
||||
// Table with spans. Default "jaeger_spans_local" or "jaeger_spans" when replication is enabled.
|
||||
SpansTable clickhousespanstore.TableName `yaml:"spans_table"`
|
||||
// Span index table. Default "jaeger_index_local" or "jaeger_index" when replication is enabled.
|
||||
|
@ -65,6 +74,14 @@ type Configuration struct {
|
|||
TTLDays uint `yaml:"ttl"`
|
||||
// The maximum number of spans to fetch per trace. If 0, no limits is set. Default 0.
|
||||
MaxNumSpans uint `yaml:"max_num_spans"`
|
||||
// The maximum number of open connections to the database. Default is unlimited (see: https://pkg.go.dev/database/sql#DB.SetMaxOpenConns)
|
||||
MaxOpenConns *uint `yaml:"max_open_conns"`
|
||||
// The maximum number of database connections in the idle connection pool. Default 2. (see: https://pkg.go.dev/database/sql#DB.SetMaxIdleConns)
|
||||
MaxIdleConns *uint `yaml:"max_idle_conns"`
|
||||
// The maximum amount of milliseconds a database connection may be reused. Default = connections are never closed due to age (see: https://pkg.go.dev/database/sql#DB.SetConnMaxLifetime)
|
||||
ConnMaxLifetimeMillis *uint `yaml:"conn_max_lifetime_millis"`
|
||||
// The maximum amount of milliseconds a database connection may be idle. Default = connections are never closed due to idle time (see: https://pkg.go.dev/database/sql#DB.SetConnMaxIdleTime)
|
||||
ConnMaxIdleTimeMillis *uint `yaml:"conn_max_idle_time_millis"`
|
||||
}
|
||||
|
||||
func (cfg *Configuration) setDefaults() {
|
||||
|
@ -80,6 +97,16 @@ func (cfg *Configuration) setDefaults() {
|
|||
if cfg.Encoding == "" {
|
||||
cfg.Encoding = defaultEncoding
|
||||
}
|
||||
if cfg.InitTables == nil {
|
||||
// Decide whether to init tables based on whether a custom script path was provided
|
||||
var defaultInitTables bool
|
||||
if cfg.InitSQLScriptsDir == "" {
|
||||
defaultInitTables = true
|
||||
} else {
|
||||
defaultInitTables = false
|
||||
}
|
||||
cfg.InitTables = &defaultInitTables
|
||||
}
|
||||
if cfg.Username == "" {
|
||||
cfg.Username = defaultUsername
|
||||
}
|
||||
|
|
|
@ -4,9 +4,9 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
)
|
||||
|
||||
func TestSetDefaults(t *testing.T) {
|
||||
|
@ -102,3 +102,27 @@ func TestConfiguration_GetSpansArchiveTable(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguration_InitTables(test *testing.T) {
|
||||
// for pointers below
|
||||
t := true
|
||||
f := false
|
||||
tests := map[string]struct {
|
||||
config Configuration
|
||||
expectedInitTables bool
|
||||
}{
|
||||
"scriptsempty_initnil": {config: Configuration{}, expectedInitTables: true},
|
||||
"scriptsprovided_initnil": {config: Configuration{InitSQLScriptsDir: "hello"}, expectedInitTables: false},
|
||||
"scriptsempty_inittrue": {config: Configuration{InitTables: &t}, expectedInitTables: true},
|
||||
"scriptsprovided_inittrue": {config: Configuration{InitSQLScriptsDir: "hello", InitTables: &t}, expectedInitTables: true},
|
||||
"scriptsempty_initfalse": {config: Configuration{InitTables: &f}, expectedInitTables: false},
|
||||
"scriptsprovided_initfalse": {config: Configuration{InitSQLScriptsDir: "hello", InitTables: &f}, expectedInitTables: false},
|
||||
}
|
||||
|
||||
for name, testcase := range tests {
|
||||
test.Run(name, func(t *testing.T) {
|
||||
testcase.config.setDefaults()
|
||||
assert.Equal(t, testcase.expectedInitTables, *(testcase.config.InitTables))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
350
storage/store.go
350
storage/store.go
|
@ -4,22 +4,22 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
jaegerclickhouse "github.com/jaegertracing/jaeger-clickhouse"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
clickhouse "github.com/ClickHouse/clickhouse-go/v2"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc/shared"
|
||||
"github.com/jaegertracing/jaeger/storage/dependencystore"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
|
||||
jaegerclickhouse "github.com/jaegertracing/jaeger-clickhouse"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousedependencystore"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
)
|
||||
|
@ -32,14 +32,11 @@ type Store struct {
|
|||
archiveReader spanstore.Reader
|
||||
}
|
||||
|
||||
const (
|
||||
tlsConfigKey = "clickhouse_tls_config_key"
|
||||
)
|
||||
|
||||
var (
|
||||
_ shared.StoragePlugin = (*Store)(nil)
|
||||
_ shared.ArchiveStoragePlugin = (*Store)(nil)
|
||||
_ io.Closer = (*Store)(nil)
|
||||
_ shared.StoragePlugin = (*Store)(nil)
|
||||
_ shared.ArchiveStoragePlugin = (*Store)(nil)
|
||||
_ shared.StreamingSpanWriterPlugin = (*Store)(nil)
|
||||
_ io.Closer = (*Store)(nil)
|
||||
)
|
||||
|
||||
func NewStore(logger hclog.Logger, cfg Configuration) (*Store, error) {
|
||||
|
@ -56,60 +53,167 @@ func NewStore(logger hclog.Logger, cfg Configuration) (*Store, error) {
|
|||
if cfg.Replication {
|
||||
return &Store{
|
||||
db: db,
|
||||
writer: clickhousespanstore.NewSpanWriter(logger, db, cfg.SpansIndexTable, cfg.SpansTable,
|
||||
clickhousespanstore.Encoding(cfg.Encoding), cfg.BatchFlushInterval, cfg.BatchWriteSize, cfg.MaxSpanCount),
|
||||
reader: clickhousespanstore.NewTraceReader(db, cfg.OperationsTable, cfg.SpansIndexTable, cfg.SpansTable, cfg.MaxNumSpans),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(logger, db, "", cfg.GetSpansArchiveTable(),
|
||||
clickhousespanstore.Encoding(cfg.Encoding), cfg.BatchFlushInterval, cfg.BatchWriteSize, cfg.MaxSpanCount),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(db, "", "", cfg.GetSpansArchiveTable(), cfg.MaxNumSpans),
|
||||
writer: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
reader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
cfg.OperationsTable,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
"",
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
return &Store{
|
||||
db: db,
|
||||
writer: clickhousespanstore.NewSpanWriter(logger, db, cfg.SpansIndexTable, cfg.SpansTable,
|
||||
clickhousespanstore.Encoding(cfg.Encoding), cfg.BatchFlushInterval, cfg.BatchWriteSize, cfg.MaxSpanCount),
|
||||
reader: clickhousespanstore.NewTraceReader(db, cfg.OperationsTable, cfg.SpansIndexTable, cfg.SpansTable, cfg.MaxNumSpans),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(logger, db, "", cfg.GetSpansArchiveTable(),
|
||||
clickhousespanstore.Encoding(cfg.Encoding), cfg.BatchFlushInterval, cfg.BatchWriteSize, cfg.MaxSpanCount),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(db, "", "", cfg.GetSpansArchiveTable(), cfg.MaxNumSpans),
|
||||
writer: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
reader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
cfg.OperationsTable,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
"",
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func connector(cfg Configuration) (*sql.DB, error) {
|
||||
params := fmt.Sprintf("%s?database=%s&username=%s&password=%s",
|
||||
cfg.Address,
|
||||
cfg.Database,
|
||||
cfg.Username,
|
||||
cfg.Password,
|
||||
)
|
||||
var conn *sql.DB
|
||||
|
||||
options := clickhouse.Options{
|
||||
Addr: []string{sanitize(cfg.Address)},
|
||||
Auth: clickhouse.Auth{
|
||||
Database: cfg.Database,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
},
|
||||
Compression: &clickhouse.Compression{
|
||||
Method: clickhouse.CompressionLZ4,
|
||||
},
|
||||
}
|
||||
|
||||
if cfg.CaFile != "" {
|
||||
caCert, err := ioutil.ReadFile(cfg.CaFile)
|
||||
caCert, err := os.ReadFile(cfg.CaFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
err = clickhouse.RegisterTLSConfig(tlsConfigKey, &tls.Config{RootCAs: caCertPool})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
options.TLS = &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
}
|
||||
params += fmt.Sprintf(
|
||||
"&secure=true&tls_config=%s",
|
||||
tlsConfigKey,
|
||||
)
|
||||
}
|
||||
return clickhouseConnector(params)
|
||||
conn = clickhouse.OpenDB(&options)
|
||||
|
||||
if cfg.MaxOpenConns != nil {
|
||||
conn.SetMaxIdleConns(int(*cfg.MaxOpenConns))
|
||||
}
|
||||
if cfg.MaxIdleConns != nil {
|
||||
conn.SetMaxIdleConns(int(*cfg.MaxIdleConns))
|
||||
}
|
||||
if cfg.ConnMaxLifetimeMillis != nil {
|
||||
conn.SetConnMaxLifetime(time.Millisecond * time.Duration(*cfg.ConnMaxLifetimeMillis))
|
||||
}
|
||||
if cfg.ConnMaxIdleTimeMillis != nil {
|
||||
conn.SetConnMaxIdleTime(time.Millisecond * time.Duration(*cfg.ConnMaxIdleTimeMillis))
|
||||
}
|
||||
|
||||
if err := conn.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
type tableArgs struct {
|
||||
Database string
|
||||
|
||||
SpansIndexTable clickhousespanstore.TableName
|
||||
SpansTable clickhousespanstore.TableName
|
||||
OperationsTable clickhousespanstore.TableName
|
||||
SpansArchiveTable clickhousespanstore.TableName
|
||||
|
||||
TTLTimestamp string
|
||||
TTLDate string
|
||||
|
||||
Multitenant bool
|
||||
Replication bool
|
||||
}
|
||||
|
||||
type distributedTableArgs struct {
|
||||
Database string
|
||||
Table clickhousespanstore.TableName
|
||||
Hash string
|
||||
}
|
||||
|
||||
func render(templates *template.Template, filename string, args interface{}) string {
|
||||
var statement strings.Builder
|
||||
err := templates.ExecuteTemplate(&statement, filename, args)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return statement.String()
|
||||
}
|
||||
|
||||
func runInitScripts(logger hclog.Logger, db *sql.DB, cfg Configuration) error {
|
||||
var embeddedScripts embed.FS
|
||||
if cfg.Replication {
|
||||
embeddedScripts = jaegerclickhouse.EmbeddedFilesReplication
|
||||
} else {
|
||||
embeddedScripts = jaegerclickhouse.EmbeddedFilesNoReplication
|
||||
}
|
||||
|
||||
var (
|
||||
sqlStatements []string
|
||||
ttlTimestamp string
|
||||
|
@ -119,98 +223,70 @@ func runInitScripts(logger hclog.Logger, db *sql.DB, cfg Configuration) error {
|
|||
ttlTimestamp = fmt.Sprintf("TTL timestamp + INTERVAL %d DAY DELETE", cfg.TTLDays)
|
||||
ttlDate = fmt.Sprintf("TTL date + INTERVAL %d DAY DELETE", cfg.TTLDays)
|
||||
}
|
||||
switch {
|
||||
case cfg.InitSQLScriptsDir != "":
|
||||
if cfg.InitSQLScriptsDir != "" {
|
||||
filePaths, err := walkMatch(cfg.InitSQLScriptsDir, "*.sql")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list sql files: %q", err)
|
||||
}
|
||||
sort.Strings(filePaths)
|
||||
for _, f := range filePaths {
|
||||
sqlStatement, err := ioutil.ReadFile(filepath.Clean(f))
|
||||
sqlStatement, err := os.ReadFile(filepath.Clean(f))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, string(sqlStatement))
|
||||
}
|
||||
case cfg.Replication:
|
||||
f, err := embeddedScripts.ReadFile("sqlscripts/replication/0001-jaeger-index-local.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *cfg.InitTables {
|
||||
templates := template.Must(template.ParseFS(jaegerclickhouse.SQLScripts, "sqlscripts/*.tmpl.sql"))
|
||||
|
||||
args := tableArgs{
|
||||
Database: cfg.Database,
|
||||
|
||||
SpansIndexTable: cfg.SpansIndexTable,
|
||||
SpansTable: cfg.SpansTable,
|
||||
OperationsTable: cfg.OperationsTable,
|
||||
SpansArchiveTable: cfg.GetSpansArchiveTable(),
|
||||
|
||||
TTLTimestamp: ttlTimestamp,
|
||||
TTLDate: ttlDate,
|
||||
|
||||
Multitenant: cfg.Tenant != "",
|
||||
Replication: cfg.Replication,
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.SpansIndexTable.ToLocal(), ttlTimestamp))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/replication/0002-jaeger-spans-local.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if cfg.Replication {
|
||||
// Add "_local" to the local table names, and omit it from the distributed tables below
|
||||
args.SpansIndexTable = args.SpansIndexTable.ToLocal()
|
||||
args.SpansTable = args.SpansTable.ToLocal()
|
||||
args.OperationsTable = args.OperationsTable.ToLocal()
|
||||
args.SpansArchiveTable = args.SpansArchiveTable.ToLocal()
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.SpansTable.ToLocal(), ttlTimestamp))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/replication/0003-jaeger-operations-local.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-index.tmpl.sql", args))
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-operations.tmpl.sql", args))
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-spans.tmpl.sql", args))
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-spans-archive.tmpl.sql", args))
|
||||
|
||||
if cfg.Replication {
|
||||
// Now these tables omit the "_local" suffix
|
||||
distargs := distributedTableArgs{
|
||||
Table: cfg.SpansTable,
|
||||
Database: cfg.Database,
|
||||
Hash: "cityHash64(traceID)",
|
||||
}
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
|
||||
distargs.Table = cfg.SpansIndexTable
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
|
||||
distargs.Table = cfg.GetSpansArchiveTable()
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
|
||||
distargs.Table = cfg.OperationsTable
|
||||
distargs.Hash = "rand()"
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.OperationsTable.ToLocal(), ttlDate, cfg.SpansIndexTable.ToLocal().AddDbName(cfg.Database)))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/replication/0004-jaeger-spans-archive-local.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.GetSpansArchiveTable().ToLocal(), ttlTimestamp))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/replication/0005-distributed-city-hash.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(
|
||||
string(f),
|
||||
cfg.SpansTable,
|
||||
cfg.SpansTable.ToLocal().AddDbName(cfg.Database),
|
||||
cfg.Database,
|
||||
cfg.SpansTable.ToLocal(),
|
||||
))
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(
|
||||
string(f),
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansIndexTable.ToLocal().AddDbName(cfg.Database),
|
||||
cfg.Database,
|
||||
cfg.SpansIndexTable.ToLocal(),
|
||||
))
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(
|
||||
string(f),
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.GetSpansArchiveTable().ToLocal().AddDbName(cfg.Database),
|
||||
cfg.Database,
|
||||
cfg.GetSpansArchiveTable().ToLocal(),
|
||||
))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/replication/0006-distributed-rand.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(
|
||||
string(f),
|
||||
cfg.OperationsTable,
|
||||
cfg.OperationsTable.ToLocal().AddDbName(cfg.Database),
|
||||
cfg.Database,
|
||||
cfg.OperationsTable.ToLocal(),
|
||||
))
|
||||
default:
|
||||
f, err := embeddedScripts.ReadFile("sqlscripts/local/0001-jaeger-index.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.SpansIndexTable, ttlTimestamp))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/local/0002-jaeger-spans.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.SpansTable, ttlTimestamp))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/local/0003-jaeger-operations.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.OperationsTable, ttlDate, cfg.SpansIndexTable))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/local/0004-jaeger-spans-archive.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, fmt.Sprintf(string(f), cfg.GetSpansArchiveTable(), ttlTimestamp))
|
||||
}
|
||||
return executeScripts(logger, sqlStatements, db)
|
||||
}
|
||||
|
@ -235,21 +311,12 @@ func (s *Store) ArchiveSpanWriter() spanstore.Writer {
|
|||
return s.archiveWriter
|
||||
}
|
||||
|
||||
func (s *Store) Close() error {
|
||||
return s.db.Close()
|
||||
func (s *Store) StreamingSpanWriter() spanstore.Writer {
|
||||
return s.writer
|
||||
}
|
||||
|
||||
func clickhouseConnector(params string) (*sql.DB, error) {
|
||||
db, err := sql.Open("clickhouse", params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
func (s *Store) Close() error {
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
func executeScripts(logger hclog.Logger, sqlStatements []string, db *sql.DB) error {
|
||||
|
@ -296,3 +363,10 @@ func walkMatch(root, pattern string) ([]string, error) {
|
|||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// Earlier version of clickhouse-go used to expect address as tcp://host:port
|
||||
// while newer version of clickhouse-go expect address as host:port (without scheme)
|
||||
// so to maintain backward compatibility we clean it up
|
||||
func sanitize(addr string) string {
|
||||
return strings.TrimPrefix(addr, "tcp://")
|
||||
}
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
@ -83,6 +83,7 @@ func newStore(db *sql.DB, logger mocks.SpyLogger) Store {
|
|||
db,
|
||||
testIndexTable,
|
||||
testSpansTable,
|
||||
"",
|
||||
clickhousespanstore.EncodingJSON,
|
||||
0,
|
||||
0,
|
||||
|
@ -93,6 +94,7 @@ func newStore(db *sql.DB, logger mocks.SpyLogger) Store {
|
|||
testOperationsTable,
|
||||
testIndexTable,
|
||||
testSpansTable,
|
||||
"",
|
||||
0,
|
||||
),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(
|
||||
|
@ -100,6 +102,7 @@ func newStore(db *sql.DB, logger mocks.SpyLogger) Store {
|
|||
db,
|
||||
testIndexTable,
|
||||
testSpansArchiveTable,
|
||||
"",
|
||||
clickhousespanstore.EncodingJSON,
|
||||
0,
|
||||
0,
|
||||
|
@ -110,6 +113,7 @@ func newStore(db *sql.DB, logger mocks.SpyLogger) Store {
|
|||
testOperationsTable,
|
||||
testIndexTable,
|
||||
testSpansArchiveTable,
|
||||
"",
|
||||
0,
|
||||
),
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue