Compare commits
98 Commits
Author | SHA1 | Date |
---|---|---|
|
a31f41ba2e | |
|
798c568c1e | |
|
309d461899 | |
|
e307ceed02 | |
|
b5f0227e4b | |
|
6818bcd0f2 | |
|
04d66b9640 | |
|
62a81cbb7e | |
|
a99be7183c | |
|
d591dcf372 | |
|
72aa97f41b | |
|
833ff93c57 | |
|
d305b2e6f1 | |
|
ee2afb33be | |
|
8cc5cc0bfa | |
|
0f9c6a8413 | |
|
34ad2369e9 | |
|
a79c103b88 | |
|
633d9098be | |
|
63dd379962 | |
|
f47c43c8ab | |
|
9e901791ef | |
|
cc0f88c617 | |
|
79ac7f846a | |
|
a7a9b88bc9 | |
|
eb41b9bf57 | |
|
e3e5c1dfc3 | |
|
ef180789e0 | |
|
4172a80936 | |
|
b98ffb3d29 | |
|
96294e9f07 | |
|
e3a797212f | |
|
18a9734ddc | |
|
4be9854a34 | |
|
8428adf537 | |
|
531a89a21d | |
|
ff2be9b01d | |
|
280c683295 | |
|
f9ce5929e8 | |
|
e1f0388828 | |
|
6dc63a78f7 | |
|
377c5dbd84 | |
|
33978b5bd8 | |
|
ddb3cd760f | |
|
14932caba5 | |
|
3cddd04c58 | |
|
f8b1a94912 | |
|
712c4842e0 | |
|
8a1942a514 | |
|
a0e9e9be83 | |
|
a5216d70b7 | |
|
ea91aca7a7 | |
|
3f94aadc0b | |
|
9749fce675 | |
|
01b6024db5 | |
|
d24c2462f9 | |
|
f23dabb11e | |
|
c90bfe7fbf | |
|
fc1cdf57af | |
|
e62e165b6a | |
|
84b8235916 | |
|
1dfc998e0c | |
|
b13bb0baa5 | |
|
bf2eb2545c | |
|
dadd23526a | |
|
f393a94c9e | |
|
20e42a6d21 | |
|
0cc913e637 | |
|
2d415a1a62 | |
|
c29ac4a85b | |
|
55694dac4f | |
|
d67bbb6120 | |
|
eceb1d269e | |
|
6b89a50f3c | |
|
3f8c8d901c | |
|
affc10c1ad | |
|
cb22c51ce6 | |
|
f3f8650063 | |
|
675cb1d0f6 | |
|
6d4359dcdc | |
|
a1b34d228a | |
|
09e7f6ba86 | |
|
7a19cafb1c | |
|
6cb93ce20a | |
|
52f58deabf | |
|
04c867dcda | |
|
b5358e6b6f | |
|
a20afe6045 | |
|
99ca644159 | |
|
0d62d1e90b | |
|
fb46b833fc | |
|
1940f92bcf | |
|
49685ab201 | |
|
3c150f77e4 | |
|
6ea8852c7c | |
|
4c545fc250 | |
|
49bda9ec86 | |
|
7c24c2e0a1 |
|
@ -1,4 +1,4 @@
|
|||
name: Build and format
|
||||
name: Build, test, format and lint
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -7,7 +7,18 @@ on:
|
|||
jobs:
|
||||
build-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build binaries
|
||||
name: Build binary for ${{ matrix.platform.name }}
|
||||
strategy:
|
||||
matrix:
|
||||
platform:
|
||||
- name: linux on amd64
|
||||
task: build-linux-amd64
|
||||
- name: linux on arm64
|
||||
task: build-linux-arm64
|
||||
- name: osx on amd64
|
||||
task: build-darwin-amd64
|
||||
- name: osx on arm64
|
||||
task: build-darwin-arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
|
@ -15,14 +26,14 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.16
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Build binaries
|
||||
run: make build
|
||||
run: make ${{ matrix.platform.task }}
|
||||
|
||||
format:
|
||||
format-lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Format
|
||||
name: Format and lint
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
|
@ -30,10 +41,61 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.16
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Install tools
|
||||
run: make install-tools
|
||||
|
||||
- name: Format
|
||||
run: make fmt && git diff --quiet
|
||||
|
||||
- name: Lint
|
||||
run: make lint
|
||||
|
||||
e2e-test:
|
||||
runs-on: ubuntu-latest
|
||||
name: E2E Test
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Run e2e test
|
||||
run: make e2e-tests
|
||||
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
name: Unit tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Run unit test
|
||||
run: make test
|
||||
|
||||
integration-tests:
|
||||
runs-on: ubuntu-latest
|
||||
name: Integration tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Setup database
|
||||
run: docker run --rm -d -p9000:9000 --name test-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:22
|
||||
|
||||
- name: Run integration tests
|
||||
run: make integration-test
|
||||
|
|
|
@ -15,10 +15,22 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.16
|
||||
go-version: ^1.19
|
||||
|
||||
- name: Create release distribution
|
||||
run: make build tar
|
||||
run: make build-all-platforms tar-all-platforms
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build docker image
|
||||
run: |
|
||||
export DOCKER_TAG=${GITHUB_REF##*/}
|
||||
make docker docker-push
|
||||
|
||||
- name: Create Github release
|
||||
run: |
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# which dirs to skip: issues from them won't be reported;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but default dirs are skipped independently
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
skip-dirs:
|
||||
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: false
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
# to go.mod are needed. This setting is most useful to check that go.mod does
|
||||
# not need updates, such as in a continuous integration and testing system.
|
||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
# directory holds the correct copies of dependencies and ignores
|
||||
# the dependency descriptions in go.mod.
|
||||
modules-download-mode: readonly
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
|
||||
# settings per analyzer
|
||||
settings:
|
||||
printf: # analyzer name, run `go tool vet help` to see all analyzers
|
||||
funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
|
||||
enable-all: true
|
||||
# TODO: Enable this and fix the alignment issues.
|
||||
disable:
|
||||
- fieldalignment
|
||||
|
||||
revive:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: go.opentelemetry.io/collector
|
||||
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
ignore-words:
|
||||
- cancelled
|
||||
- metre
|
||||
- meter
|
||||
- metres
|
||||
- kilometre
|
||||
- kilometres
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- errcheck
|
||||
enable:
|
||||
- exportloopref
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosec
|
||||
- govet
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unparam
|
||||
|
||||
issues:
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: otlp_test.go
|
||||
linters:
|
||||
# See https://github.com/golangci/golangci-lint/issues/537#issuecomment-545170007
|
||||
- structcheck
|
||||
- text: "G404:"
|
||||
linters:
|
||||
- gosec
|
||||
- text: "G402:"
|
||||
linters:
|
||||
- gosec
|
||||
- path: grpc_test.go
|
||||
linters:
|
||||
# See https://github.com/golangci/golangci-lint/issues/2286
|
||||
- typecheck
|
||||
|
||||
# The list of ids of default excludes to include or disable. By default it's empty.
|
||||
# See the list of default excludes here https://golangci-lint.run/usage/configuration.
|
||||
include:
|
||||
- EXC0001
|
||||
- EXC0002
|
||||
- EXC0003
|
||||
- EXC0004
|
||||
- EXC0005
|
||||
- EXC0006
|
||||
- EXC0007
|
||||
# - EXC0008 - Duplicated errcheck checks
|
||||
- EXC0009
|
||||
- EXC0010
|
||||
- EXC0011
|
|
@ -0,0 +1,8 @@
|
|||
FROM docker.io/library/alpine:3.16
|
||||
|
||||
ADD jaeger-clickhouse-linux-amd64 /go/bin/jaeger-clickhouse
|
||||
|
||||
RUN mkdir /plugin
|
||||
|
||||
# /plugin/ location is defined in jaeger-operator
|
||||
CMD ["cp", "/go/bin/jaeger-clickhouse", "/plugin/jaeger-clickhouse"]
|
82
Makefile
|
@ -1,28 +1,102 @@
|
|||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
GOBUILD=CGO_ENABLED=0 installsuffix=cgo go build -trimpath
|
||||
|
||||
TOOLS_MOD_DIR = ./internal/tools
|
||||
JAEGER_VERSION ?= 1.32.0
|
||||
|
||||
JAEGER_ALL_IN_ONE ?= ${HOME}/projects/jaegertracing/jaeger/cmd/all-in-one/all-in-one-linux-amd64
|
||||
DOCKER_REPO ?= ghcr.io/jaegertracing/jaeger-clickhouse
|
||||
DOCKER_TAG ?= latest
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go build -o jaeger-clickhouse-$(GOOS)-$(GOARCH) ./cmd/jaeger-clickhouse/main.go
|
||||
${GOBUILD} -o jaeger-clickhouse-$(GOOS)-$(GOARCH) ./cmd/jaeger-clickhouse/main.go
|
||||
|
||||
.PHONY: build-linux-amd64
|
||||
build-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
.PHONY: build-linux-arm64
|
||||
build-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) build
|
||||
|
||||
.PHONY: build-darwin-amd64
|
||||
build-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) build
|
||||
|
||||
.PHONY: build-darwin-arm64
|
||||
build-darwin-arm64:
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) build
|
||||
|
||||
.PHONY: build-all-platforms
|
||||
build-all-platforms: build-linux-amd64 build-linux-arm64 build-darwin-amd64 build-darwin-arm64
|
||||
|
||||
.PHONY: e2e-tests
|
||||
e2e-tests:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
E2E_TEST=true go test ./e2etests... -v
|
||||
|
||||
.PHONY: run
|
||||
run:
|
||||
SPAN_STORAGE_TYPE=grpc-plugin ${JAEGER_ALL_IN_ONE} --grpc-storage-plugin.binary=./jaeger-clickhouse-$(GOOS)-$(GOARCH) --grpc-storage-plugin.configuration-file=./config.yaml
|
||||
docker run --rm --name jaeger -e JAEGER_DISABLED=false --link some-clickhouse-server -it -u ${shell id -u} -p16686:16686 -p14250:14250 -p14268:14268 -p6831:6831/udp -v "${PWD}:/data" -e SPAN_STORAGE_TYPE=grpc-plugin jaegertracing/all-in-one:${JAEGER_VERSION} --query.ui-config=/data/jaeger-ui.json --grpc-storage-plugin.binary=/data/jaeger-clickhouse-$(GOOS)-$(GOARCH) --grpc-storage-plugin.configuration-file=/data/config.yaml --grpc-storage-plugin.log-level=debug
|
||||
|
||||
.PHONY: run-hotrod
|
||||
run-hotrod:
|
||||
docker run --rm --link jaeger --env JAEGER_AGENT_HOST=jaeger --env JAEGER_AGENT_PORT=6831 -p8080:8080 jaegertracing/example-hotrod:${JAEGER_VERSION} all
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
go fmt ./...
|
||||
goimports -w -local github.com/pavolloffay/jaeger-clickhouse ./
|
||||
goimports -w -local github.com/jaegertracing/jaeger-clickhouse ./
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint -v run --allow-parallel-runners ./...
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go test ./...
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: build
|
||||
STORAGE=grpc-plugin \
|
||||
PLUGIN_BINARY_PATH=$(PWD)/jaeger-clickhouse-linux-amd64 \
|
||||
PLUGIN_CONFIG_PATH=$(PWD)/integration/config-local.yaml \
|
||||
go test ./integration
|
||||
|
||||
.PHONY: tar
|
||||
tar:
|
||||
tar -czvf jaeger-clickhouse-$(GOOS)-$(GOARCH).tar.gz jaeger-clickhouse-$(GOOS)-$(GOARCH) config.yaml
|
||||
|
||||
.PHONY: tar-linux-amd64
|
||||
tar-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-linux-arm64
|
||||
tar-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-darwin-amd64
|
||||
tar-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-darwin-arm64
|
||||
tar-darwin-arm64:
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) tar
|
||||
|
||||
.PHONY: tar-all-platforms
|
||||
tar-all-platforms: tar-linux-amd64 tar-linux-arm64 tar-darwin-amd64 tar-darwin-arm64
|
||||
|
||||
.PHONY: docker
|
||||
docker: build
|
||||
docker build -t ${DOCKER_REPO}:${DOCKER_TAG} -f Dockerfile .
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: build
|
||||
docker push ${DOCKER_REPO}:${DOCKER_TAG}
|
||||
|
||||
.PHONY: install-tools
|
||||
install-tools:
|
||||
cd $(TOOLS_MOD_DIR) && go install golang.org/x/tools/cmd/goimports
|
||||
cd $(TOOLS_MOD_DIR) && go install github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
|
||||
|
|
67
README.md
|
@ -1,22 +1,65 @@
|
|||
# Jaeger ClickHouse
|
||||
# Jaeger ClickHouse (experimental)
|
||||
|
||||
Jaeger ClickHouse gRPC [storage plugin](https://github.com/jaegertracing/jaeger/tree/master/plugin/storage/grpc).
|
||||
⚠️ This module only implements grpc-plugin API that has been deprecated in Jaeger (https://github.com/jaegertracing/jaeger/issues/4647).
|
||||
|
||||
This is WIP and it is based on https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse.
|
||||
See as well [jaegertracing/jaeger/issues/1438](https://github.com/jaegertracing/jaeger/issues/1438) for ClickHouse plugin.
|
||||
This is a [Jaeger gRPC storage plugin](https://github.com/jaegertracing/jaeger/tree/master/plugin/storage/grpc) implementation for storing traces in ClickHouse.
|
||||
|
||||
## Build
|
||||
## Project status
|
||||
|
||||
This is a community-driven project, and we would love to hear your issues and feature requests.
|
||||
Pull requests are also greatly appreciated.
|
||||
|
||||
## Why use ClickHouse for Jaeger?
|
||||
|
||||
[ClickHouse](https://clickhouse.com) is an analytical column-oriented database management system.
|
||||
It is designed to analyze streams of events which are kind of resemblant to spans.
|
||||
It's open-source, optimized for performance, and actively developed.
|
||||
|
||||
## How it works
|
||||
|
||||
Jaeger spans are stored in 2 tables. The first contains the whole span encoded either in JSON or Protobuf.
|
||||
The second stores key information about spans for searching. This table is indexed by span duration and tags.
|
||||
Also, info about operations is stored in the materialized view. There are not indexes for archived spans.
|
||||
Storing data in replicated local tables with distributed global tables is natively supported. Spans are bufferized.
|
||||
Span buffers are flushed to DB either by timer or after reaching max batch size. Timer interval and batch size can be
|
||||
set in [config file](./config.yaml).
|
||||
|
||||
Database schema generated by JetBrains DataGrip
|
||||

|
||||
|
||||
# How to start using Jaeger over ClickHouse
|
||||
|
||||
## Documentation
|
||||
|
||||
Refer to the [config.yaml](./config.yaml) for all supported configuration options.
|
||||
|
||||
* [Kubernetes deployment](./guide-kubernetes.md)
|
||||
* [Sharding and replication](./guide-sharding-and-replication.md)
|
||||
* [Multi-tenancy](./guide-multitenancy.md)
|
||||
|
||||
## Build & Run
|
||||
|
||||
### Docker database example
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:22
|
||||
GOOS=linux make build run
|
||||
make run-hotrod
|
||||
```
|
||||
|
||||
Open [localhost:16686](http://localhost:16686) and [localhost:8080](http://localhost:8080).
|
||||
|
||||
### Custom database
|
||||
|
||||
You need to specify connection options in `config.yaml`, then you can run
|
||||
|
||||
```bash
|
||||
make build
|
||||
SPAN_STORAGE_TYPE=grpc-plugin {Jaeger binary adress} --query.ui-config=jaeger-ui.json --grpc-storage-plugin.binary=./{name of built binary} --grpc-storage-plugin.configuration-file=config.yaml --grpc-storage-plugin.log-level=debug
|
||||
```
|
||||
|
||||
## Run
|
||||
## Credits
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:21
|
||||
# download Jaeger all-in-one
|
||||
JAEGER_ALL_IN_ONE=<path to all-in-one> make run
|
||||
```
|
||||
This project is originally based on [this clickhouse plugin implementation](https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse).
|
||||
|
||||
After each Jaeger run the ClickHouse server has to be restarted or changed `sql_scripts_dir` to a directory with no SLQ scripts.
|
||||
See also [jaegertracing/jaeger/issues/1438](https://github.com/jaegertracing/jaeger/issues/1438) for historical discussion regarding the implementation of a ClickHouse plugin.
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
# Jaeger ClickHouse
|
||||
This is an implementation of Jaeger's [storage plugin](https://github.com/jaegertracing/jaeger/tree/master/plugin/storage/grpc) for ClickHouse.
|
||||
See as well [jaegertracing/jaeger/issues/1438](https://github.com/jaegertracing/jaeger/issues/1438) for historical discussion regarding Clickhouse plugin.
|
||||
|
||||
## Project status
|
||||
|
||||
Jaeger ClickHouse is a community-driven project, we would love to hear your feature requests.
|
||||
Pull requests also will be greatly appreciated.
|
||||
|
||||
## Why use ClickHouse for Jaeger?
|
||||
|
||||
[ClickHouse](https://github.com/clickhouse/clickhouse) is an analytical column-oriented database management system. It is designed to analyze streams of clicks which are kind of resemblant to spans. It's open-source, optimized for performance, and actively developed.
|
||||
|
||||
## How does it work?
|
||||
|
||||
Jaeger spans are stored in 2 tables. First one contains whole span encoded either in JSON or Protobuf.
|
||||
Second stores key information about spans for searching. This table is indexed by span duration and tags.
|
||||
Also, info about operations is stored in the materialized view. There are no indexes for archived spans.
|
||||
Storing data in replicated local tables with distributed global tables is natively supported. Spans are buffered.
|
||||
Span buffers are flushed to DB either by timer or after reaching max batch size.
|
||||
Timer interval and batch size can be set in [config file](../config.yaml).
|
||||
|
||||

|
||||
|
||||
## Benchmarks
|
||||
|
||||
10^8 traces were flushed using [jaeger-tracegen](https://www.jaegertracing.io/docs/1.25/tools/) to Clickhouse and ElasticSearch servers.
|
||||
Clickhouse server consisted of 3 shards, 2 hosts in each, and 3 Zookeeper hosts. Elasticsearch server consisted of 6 hosts,
|
||||
with 5 shards for primary index and 1 replica. All hosts were equal(8 vCPU, 32 GiB RAM, 20 GiB SSD).
|
||||
|
||||
### General stats
|
||||
|
||||
Cpu usage, [% of 1 host CPU]
|
||||

|
||||
|
||||
Memory usage, [bytes]
|
||||

|
||||
|
||||
IO write, [operations]
|
||||

|
||||
|
||||
Disk usage, [bytes]
|
||||

|
||||
|
||||
### Recorded
|
||||
|
||||
#### ClickHouse
|
||||
|
||||
```sql
|
||||
SELECT count()
|
||||
FROM jaeger_index
|
||||
WHERE service = 'tracegen'
|
||||
|
||||
┌──count()─┐
|
||||
│ 57026426 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
#### Elasticsearch
|
||||
|
||||

|
||||
|
||||
# How to start using Jaeger over ClickHouse
|
||||
|
||||
## Documentation
|
||||
|
||||
Refer to the [config.yaml](../config.yaml) for all supported configuration options.
|
||||
|
||||
* [Kubernetes deployment](../guide-kubernetes.md)
|
||||
* [Sharding and replication](../guide-sharding-and-replication.md)
|
||||
* [Multi-tenancy](../guide-multitenancy.md)
|
||||
|
||||
## Build & Run
|
||||
|
||||
### Docker database example
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p9000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:22
|
||||
GOOS=linux make build run
|
||||
make run-hotrod
|
||||
```
|
||||
|
||||
Open [localhost:16686](http://localhost:16686) and [localhost:8080](http://localhost:8080).
|
||||
|
||||
### Custom database
|
||||
|
||||
You need to specify connection options in config.yaml file, then you can run
|
||||
|
||||
```bash
|
||||
make build
|
||||
SPAN_STORAGE_TYPE=grpc-plugin {Jaeger binary adress} --query.ui-config=jaeger-ui.json --grpc-storage-plugin.binary=./{name of built binary} --grpc-storage-plugin.configuration-file=config.yaml --grpc-storage-plugin.log-level=debug
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
This project is based on https://github.com/bobrik/jaeger/tree/ivan/clickhouse/plugin/storage/clickhouse.
|
After Width: | Height: | Size: 526 KiB |
After Width: | Height: | Size: 461 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 377 KiB |
After Width: | Height: | Size: 307 KiB |
After Width: | Height: | Size: 127 KiB |
|
@ -2,16 +2,20 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
// Package contains time zone info for connecting to ClickHouse servers with non-UTC time zone
|
||||
_ "time/tzdata"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc/shared"
|
||||
"gopkg.in/yaml.v3"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
jaegerclickhouse "github.com/pavolloffay/jaeger-clickhouse"
|
||||
"github.com/pavolloffay/jaeger-clickhouse/storage"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -20,31 +24,44 @@ func main() {
|
|||
flag.Parse()
|
||||
|
||||
logger := hclog.New(&hclog.LoggerOptions{
|
||||
Name: "jaeger-clickhouse",
|
||||
Level: hclog.Warn, // Jaeger only captures >= Warn, so don't bother logging below Warn
|
||||
Name: "jaeger-clickhouse",
|
||||
// If this is set to e.g. Warn, the debug logs are never sent to Jaeger even despite
|
||||
// --grpc-storage-plugin.log-level=debug
|
||||
Level: hclog.Trace,
|
||||
JSONFormat: true,
|
||||
})
|
||||
|
||||
cfgFile, err := ioutil.ReadFile(configPath)
|
||||
cfgFile, err := os.ReadFile(filepath.Clean(configPath))
|
||||
if err != nil {
|
||||
logger.Error("Could not read config file: %q: %q", configPath, err)
|
||||
logger.Error("Could not read config file", "config", configPath, "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
var cfg storage.Configuration
|
||||
err = yaml.Unmarshal(cfgFile, &cfg)
|
||||
if err != nil {
|
||||
logger.Error("Could not parse config file: %q", err)
|
||||
logger.Error("Could not parse config file", "error", err)
|
||||
}
|
||||
|
||||
var store shared.PluginServices
|
||||
s, err := storage.NewStore(logger, cfg, jaegerclickhouse.EmbeddedFiles)
|
||||
go func() {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
err = http.ListenAndServe(cfg.MetricsEndpoint, nil)
|
||||
if err != nil {
|
||||
logger.Error("Failed to listen for metrics endpoint", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var pluginServices shared.PluginServices
|
||||
store, err := storage.NewStore(logger, cfg)
|
||||
if err != nil {
|
||||
logger.Error("Failed to crate storage", err)
|
||||
logger.Error("Failed to create a storage", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
store.Store = s
|
||||
pluginServices.Store = store
|
||||
pluginServices.ArchiveStore = store
|
||||
pluginServices.StreamingSpanWriter = store
|
||||
|
||||
grpc.Serve(&store)
|
||||
if err = s.Close(); err != nil {
|
||||
grpc.Serve(&pluginServices)
|
||||
if err = store.Close(); err != nil {
|
||||
logger.Error("Failed to close store", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
50
config.yaml
|
@ -1,3 +1,49 @@
|
|||
address: tcp://localhost:9000
|
||||
# When empty the embedded scripts from sqlscripts directory are used
|
||||
address: some-clickhouse-server:9000
|
||||
# Directory with .sql files to run at plugin startup, mainly for integration tests.
|
||||
# Depending on the value of "init_tables", this can be run as a
|
||||
# replacement or supplement to creating default tables for span storage.
|
||||
# If init_tables is also enabled, the scripts in this directory will be run first.
|
||||
init_sql_scripts_dir:
|
||||
# Whether to automatically attempt to create tables in ClickHouse.
|
||||
# By default, this is enabled if init_sql_scripts_dir is empty,
|
||||
# or disabled if init_sql_scripts_dir is provided.
|
||||
init_tables:
|
||||
# Maximal amount of spans that can be pending writes at a time.
|
||||
# New spans exceeding this limit will be discarded,
|
||||
# keeping memory in check if there are issues writing to ClickHouse.
|
||||
# Check the "jaeger_clickhouse_discarded_spans" metric to keep track of discards.
|
||||
# If 0, no limit is set. Default 10_000_000.
|
||||
max_span_count:
|
||||
# Batch write size. Default 10_000.
|
||||
batch_write_size:
|
||||
# Batch flush interval. Default 5s.
|
||||
batch_flush_interval:
|
||||
# Encoding of stored data. Either json or protobuf. Default json.
|
||||
encoding:
|
||||
# Path to CA TLS certificate.
|
||||
ca_file:
|
||||
# Username for connection to ClickHouse. Default is "default".
|
||||
username:
|
||||
# Password for connection to ClickHouse.
|
||||
password:
|
||||
# ClickHouse database name. The database must be created manually before Jaeger starts. Default is "default".
|
||||
database:
|
||||
# If non-empty, enables a tenant column in tables, and uses the provided tenant name for this instance.
|
||||
# Default is empty. See guide-multitenancy.md for more information.
|
||||
tenant:
|
||||
# Endpoint for serving prometheus metrics. Default localhost:9090.
|
||||
metrics_endpoint: localhost:9090
|
||||
# Whether to use sql scripts supporting replication and sharding.
|
||||
# Replication can be used only on database with Atomic engine.
|
||||
# Default false.
|
||||
replication:
|
||||
# Table with spans. Default "jaeger_spans_local" or "jaeger_spans" when replication is enabled.
|
||||
spans_table:
|
||||
# Span index table. Default "jaeger_index_local" or "jaeger_index" when replication is enabled.
|
||||
spans_index_table:
|
||||
# Operations table. Default "jaeger_operations_local" or "jaeger_operations" when replication is enabled.
|
||||
operations_table:
|
||||
# TTL for data in tables in days. If 0, no TTL is set. Default 0.
|
||||
ttl:
|
||||
# The maximum number of spans to fetch per trace. If 0, no limit is set. Default 0.
|
||||
max_num_spans:
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
<!-- Minimal configuration to enable cluster mode in a single clickhouse process -->
|
||||
<yandex>
|
||||
<macros>
|
||||
<installation>cluster</installation>
|
||||
<all-sharded-shard>0</all-sharded-shard>
|
||||
<cluster>cluster</cluster>
|
||||
<shard>0</shard>
|
||||
<replica>cluster-0-0</replica>
|
||||
</macros>
|
||||
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
|
||||
<keeper_server>
|
||||
<tcp_port>2181</tcp_port>
|
||||
<server_id>0</server_id>
|
||||
<log_storage_path>/var/log/clickhouse-server/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>0</id>
|
||||
<hostname>localhost</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
|
||||
<zookeeper>
|
||||
<!-- Clickhouse Keeper -->
|
||||
<node>
|
||||
<host>localhost</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
<distributed_ddl>
|
||||
<path>/clickhouse/cluster/task_queue/ddl</path>
|
||||
</distributed_ddl>
|
||||
</yandex>
|
|
@ -0,0 +1,4 @@
|
|||
address: chi:9000
|
||||
tenant: multi1
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,4 @@
|
|||
address: chi:9000
|
||||
tenant: multi2
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,3 @@
|
|||
address: chi:9000
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,5 @@
|
|||
address: chi:9000
|
||||
replication: true
|
||||
tenant: multi1
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,5 @@
|
|||
address: chi:9000
|
||||
replication: true
|
||||
tenant: multi2
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,4 @@
|
|||
address: chi:9000
|
||||
replication: true
|
||||
# For test purposes flush on every write
|
||||
batch_write_size: 1
|
|
@ -0,0 +1,203 @@
|
|||
package e2etests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clickhouse "github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ecodia/golang-awaitility/awaitility"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
testcontainers "github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
clickHouseImage = "clickhouse/clickhouse-server:22"
|
||||
jaegerImage = "jaegertracing/all-in-one:1.32.0"
|
||||
|
||||
networkName = "chi-jaeger-test"
|
||||
clickhousePort = "9000/tcp"
|
||||
jaegerQueryPort = "16686/tcp"
|
||||
jaegerAdminPort = "14269/tcp"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
configs []string
|
||||
chiconf *string
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
if os.Getenv("E2E_TEST") == "" {
|
||||
t.Skip("Set E2E_TEST=true to run the test")
|
||||
}
|
||||
|
||||
// Minimal additional configuration (config.d) to enable cluster mode
|
||||
chireplconf := "clickhouse-replicated.xml"
|
||||
|
||||
tests := map[string]testCase{
|
||||
"local-single": {
|
||||
configs: []string{"config-local-single.yaml"},
|
||||
chiconf: nil,
|
||||
},
|
||||
"local-multi": {
|
||||
configs: []string{"config-local-multi1.yaml", "config-local-multi2.yaml"},
|
||||
chiconf: nil,
|
||||
},
|
||||
"replication-single": {
|
||||
configs: []string{"config-replication-single.yaml"},
|
||||
chiconf: &chireplconf,
|
||||
},
|
||||
"replication-multi": {
|
||||
configs: []string{"config-replication-multi1.yaml", "config-replication-multi2.yaml"},
|
||||
chiconf: &chireplconf,
|
||||
},
|
||||
}
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testE2E(t, test)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testE2E(t *testing.T, test testCase) {
|
||||
ctx := context.Background()
|
||||
workingDir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
network, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{
|
||||
NetworkRequest: testcontainers.NetworkRequest{Name: networkName},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer network.Remove(ctx)
|
||||
|
||||
var bindMounts map[string]string
|
||||
if test.chiconf != nil {
|
||||
bindMounts = map[string]string{
|
||||
fmt.Sprintf("%s/%s", workingDir, *test.chiconf): "/etc/clickhouse-server/config.d/testconf.xml",
|
||||
}
|
||||
} else {
|
||||
bindMounts = map[string]string{}
|
||||
}
|
||||
chReq := testcontainers.ContainerRequest{
|
||||
Image: clickHouseImage,
|
||||
ExposedPorts: []string{clickhousePort},
|
||||
WaitingFor: &clickhouseWaitStrategy{test: t, pollInterval: time.Millisecond * 200, startupTimeout: time.Minute},
|
||||
Networks: []string{networkName},
|
||||
Hostname: "chi",
|
||||
BindMounts: bindMounts,
|
||||
}
|
||||
chContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: chReq,
|
||||
Started: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer chContainer.Terminate(ctx)
|
||||
|
||||
jaegerContainers := make([]testcontainers.Container, 0)
|
||||
for _, pluginConfig := range test.configs {
|
||||
jaegerReq := testcontainers.ContainerRequest{
|
||||
Image: jaegerImage,
|
||||
ExposedPorts: []string{jaegerQueryPort, jaegerAdminPort},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort(jaegerAdminPort).WithStartupTimeout(time.Second * 10),
|
||||
Env: map[string]string{
|
||||
"SPAN_STORAGE_TYPE": "grpc-plugin",
|
||||
},
|
||||
Cmd: []string{
|
||||
"--grpc-storage-plugin.binary=/project-dir/jaeger-clickhouse-linux-amd64",
|
||||
fmt.Sprintf("--grpc-storage-plugin.configuration-file=/project-dir/e2etests/%s", pluginConfig),
|
||||
"--grpc-storage-plugin.log-level=debug",
|
||||
},
|
||||
BindMounts: map[string]string{
|
||||
workingDir + "/..": "/project-dir",
|
||||
},
|
||||
Networks: []string{networkName},
|
||||
}
|
||||
// Call Start() manually here so that if it fails then we can still access the logs.
|
||||
jaegerContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: jaegerReq,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
logs, errLogs := jaegerContainer.Logs(ctx)
|
||||
require.NoError(t, errLogs)
|
||||
all, errLogs := ioutil.ReadAll(logs)
|
||||
require.NoError(t, errLogs)
|
||||
fmt.Printf("Jaeger logs:\n---->\n%s<----\n\n", string(all))
|
||||
jaegerContainer.Terminate(ctx)
|
||||
}()
|
||||
err = jaegerContainer.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
jaegerContainers = append(jaegerContainers, jaegerContainer)
|
||||
}
|
||||
|
||||
for _, jaegerContainer := range jaegerContainers {
|
||||
jaegerQueryPort, err := jaegerContainer.MappedPort(ctx, jaegerQueryPort)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = awaitility.Await(100*time.Millisecond, time.Second*3, func() bool {
|
||||
// Jaeger traces itself so this request generates some spans
|
||||
response, errHTTP := http.Get(fmt.Sprintf("http://localhost:%d/api/services", jaegerQueryPort.Int()))
|
||||
require.NoError(t, errHTTP)
|
||||
body, errHTTP := ioutil.ReadAll(response.Body)
|
||||
require.NoError(t, errHTTP)
|
||||
var r result
|
||||
errHTTP = json.Unmarshal(body, &r)
|
||||
require.NoError(t, errHTTP)
|
||||
return len(r.Data) == 1 && r.Data[0] == "jaeger-query"
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
type clickhouseWaitStrategy struct {
|
||||
test *testing.T
|
||||
pollInterval time.Duration
|
||||
startupTimeout time.Duration
|
||||
}
|
||||
|
||||
var _ wait.Strategy = (*clickhouseWaitStrategy)(nil)
|
||||
|
||||
func (c *clickhouseWaitStrategy) WaitUntilReady(ctx context.Context, target wait.StrategyTarget) error {
|
||||
ctx, cancelContext := context.WithTimeout(ctx, c.startupTimeout)
|
||||
defer cancelContext()
|
||||
|
||||
port, err := target.MappedPort(ctx, clickhousePort)
|
||||
require.NoError(c.test, err)
|
||||
|
||||
db := clickhouse.OpenDB(&clickhouse.Options{
|
||||
Addr: []string{
|
||||
fmt.Sprintf("localhost:%d", port.Int()),
|
||||
},
|
||||
Auth: clickhouse.Auth{
|
||||
Database: "default",
|
||||
},
|
||||
Compression: &clickhouse.Compression{
|
||||
Method: clickhouse.CompressionLZ4,
|
||||
},
|
||||
})
|
||||
require.NoError(c.test, err)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(c.pollInterval):
|
||||
if err := db.Ping(); err != nil {
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
4
embed.go
|
@ -1,6 +1,6 @@
|
|||
package jaeger_clickhouse
|
||||
package jaegerclickhouse
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed sqlscripts/*
|
||||
var EmbeddedFiles embed.FS
|
||||
var SQLScripts embed.FS
|
||||
|
|
103
go.mod
|
@ -1,13 +1,102 @@
|
|||
module github.com/pavolloffay/jaeger-clickhouse
|
||||
module github.com/jaegertracing/jaeger-clickhouse
|
||||
|
||||
go 1.16
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go v1.4.5
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.3.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/ecodia/golang-awaitility v0.0.0-20180710094957-fb55e59708c7
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/hashicorp/go-hclog v0.16.1
|
||||
github.com/jaegertracing/jaeger v1.24.0
|
||||
github.com/hashicorp/go-hclog v1.3.1
|
||||
github.com/jaegertracing/jaeger v1.38.2-0.20221007043206-b4c88ddf6cdd
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
go.uber.org/zap v1.18.1
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/testcontainers/testcontainers-go v0.11.1
|
||||
go.uber.org/zap v1.23.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/ClickHouse/ch-go v0.47.3 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.16 // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect
|
||||
github.com/containerd/containerd v1.5.0-beta.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.7+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.6.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/hashicorp/go-plugin v1.4.5 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.10 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.4.1 // indirect
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc93 // indirect
|
||||
github.com/paulmach/orb v0.7.1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.15 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/cobra v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.13.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
golang.org/x/net v0.0.0-20221002022538-bcab6841153b // indirect
|
||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc // indirect
|
||||
google.golang.org/grpc v1.50.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
# Kubernetes Deployment
|
||||
|
||||
This is a guide to deploy Jaeger with Clickhouse storage on Kubernetes.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Deploy [Jaeger operator](https://github.com/jaegertracing/jaeger-operator). Note that `grpc-plugin` storage type is supported since version 1.25.0.
|
||||
2. Deploy [Clickhouse operator](https://github.com/Altinity/clickhouse-operator)
|
||||
3. Deploy [Zookeeper](https://github.com/Altinity/clickhouse-operator/blob/master/docs/replication_setup.md) (if replication is used)
|
||||
|
||||
## Deploy
|
||||
|
||||
Deploy Clickhouse:
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: clickhouse.altinity.com/v1
|
||||
kind: ClickHouseInstallation
|
||||
metadata:
|
||||
name: jaeger
|
||||
labels:
|
||||
jaeger-clickhouse: demo
|
||||
spec:
|
||||
configuration:
|
||||
clusters:
|
||||
- name: cluster1
|
||||
layout:
|
||||
shardsCount: 1
|
||||
EOF
|
||||
```
|
||||
|
||||
Create config map for Jaeger Clickhouse plugin:
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: jaeger-clickhouse
|
||||
labels:
|
||||
jaeger-clickhouse: demo
|
||||
data:
|
||||
config.yaml: |
|
||||
address: clickhouse-jaeger:9000
|
||||
username: clickhouse_operator
|
||||
password: clickhouse_operator_password
|
||||
spans_table:
|
||||
spans_index_table:
|
||||
operations_table:
|
||||
EOF
|
||||
```
|
||||
|
||||
Deploy Jaeger:
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: jaegertracing.io/v1
|
||||
kind: Jaeger
|
||||
metadata:
|
||||
name: jaeger-clickhouse
|
||||
labels:
|
||||
jaeger-clickhouse: demo
|
||||
spec:
|
||||
storage:
|
||||
type: grpc-plugin
|
||||
grpcPlugin:
|
||||
image: ghcr.io/jaegertracing/jaeger-clickhouse:0.7.0
|
||||
options:
|
||||
grpc-storage-plugin:
|
||||
binary: /plugin/jaeger-clickhouse
|
||||
configuration-file: /plugin-config/config.yaml
|
||||
log-level: debug
|
||||
volumeMounts:
|
||||
- name: plugin-config
|
||||
mountPath: /plugin-config
|
||||
volumes:
|
||||
- name: plugin-config
|
||||
configMap:
|
||||
name: jaeger-clickhouse
|
||||
EOF
|
||||
```
|
||||
|
||||
## Delete all
|
||||
|
||||
```bash
|
||||
kubectl delete jaeger,cm,chi -l jaeger-clickhouse=demo
|
||||
```
|
|
@ -0,0 +1,78 @@
|
|||
# Multi-tenant deployment
|
||||
|
||||
It may be desirable to share a common ClickHouse instance across multiple Jaeger instances.
|
||||
There are two ways of doing this, depending on whether spanning the tenants across separate databases is preferable.
|
||||
|
||||
## Shared database/tables
|
||||
|
||||
If you wish to reuse the same ClickHouse database/tables across all tenants, you can specify a different `tenant: "<name>"` in each jaeger-clickhouse instance config.
|
||||
|
||||
When a non-empty `tenant` is specified, all tables will be created with a `tenant` column, and all reads/writes for a given Jaeger instance will be applied against the configured tenant name for that instance.
|
||||
|
||||
1. Create a shared database:
|
||||
```sql
|
||||
CREATE DATABASE shared ENGINE=Atomic
|
||||
```
|
||||
2. Configure the per-tenant jaeger-clickhouse clients to specify tenant names:
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_1
|
||||
```
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_2
|
||||
```
|
||||
|
||||
Multitenant mode must be enabled when the deployment is first created and cannot be toggled later, except perhaps by manually adding/removing the `tenant` column from all tables.
|
||||
Multitenant/singletenant instances must not be mixed within the same database - the two modes are mutually exclusive of each other.
|
||||
|
||||
## Separate databases
|
||||
|
||||
If you wish to keep instances fully separate, you can configure one ClickHouse database per tenant.
|
||||
This may be useful when different per-database configuration across tenants is desirable.
|
||||
|
||||
1. Create a database for each tenant:
|
||||
```sql
|
||||
CREATE DATABASE tenant_1 ENGINE=Atomic;
|
||||
CREATE DATABASE tenant_2 ENGINE=Atomic;
|
||||
```
|
||||
2. Configure the per-tenant jaeger-clickhouse plugins matching databases:
|
||||
```yaml
|
||||
database: tenant_1
|
||||
```
|
||||
```yaml
|
||||
database: tenant_2
|
||||
```
|
||||
|
||||
## Mixing methods in the same ClickHouse instance
|
||||
|
||||
Each of the methods applies on a per-database basis. The methods require different schemas and must not be mixed in a single database, but it is possible to have different databases using different methods in the same ClickHouse instance.
|
||||
|
||||
For example, there could be a `shared` database where multiple tenants are sharing the same tables:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE shared ENGINE=Atomic
|
||||
```
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_1
|
||||
```
|
||||
```yaml
|
||||
database: shared
|
||||
tenant: tenant_2
|
||||
```
|
||||
|
||||
Then there could be separate `isolated_x` databases for tenants that should be provided with their own dedicated tables, enabling e.g. better ACL isolation:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE isolated_1 ENGINE=Atomic
|
||||
CREATE DATABASE isolated_2 ENGINE=Atomic
|
||||
```
|
||||
```yaml
|
||||
database: isolated_1
|
||||
```
|
||||
```yaml
|
||||
database: isolated_2
|
||||
```
|
||||
|
||||
|
|
@ -0,0 +1,208 @@
|
|||
# Sharding and Replication
|
||||
|
||||
This is a guide how to setup sharding and replication for Jaeger data.
|
||||
This guide uses [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) to deploy
|
||||
the storage.
|
||||
|
||||
Note that the Jaeger ClickHouse plugin supports creating replicated schema out-of-the-box. Therefore,
|
||||
this guide is not necessary for setting up default replicated deployment. Also note that the
|
||||
ClickHouse operator uses by default `Ordinary` database engine, which does not work with the
|
||||
embedded replication scripts in Jaeger.
|
||||
Refer to the `config.yaml` how to setup replicated deployment.
|
||||
|
||||
## Sharding
|
||||
|
||||
Sharding is a feature that allows splitting the data into multiple Clickhouse nodes to
|
||||
increase throughput and decrease latency.
|
||||
The sharding feature uses `Distributed` engine that is backed by local tables.
|
||||
The distributed engine is a "virtual" table that does not store any data. It is used as
|
||||
an interface to insert and query data.
|
||||
|
||||
To setup sharding run the following statements on all nodes in the cluster.
|
||||
The "local" tables have to be created on the nodes before the distributed table.
|
||||
|
||||
```sql
|
||||
CREATE DATABASE jaeger ENGINE=Atomic;
|
||||
USE jaeger;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jaeger_spans AS jaeger_spans_local ENGINE = Distributed('{cluster}', default, jaeger_spans_local, cityHash64(traceID));
|
||||
CREATE TABLE IF NOT EXISTS jaeger_index AS jaeger_index_local ENGINE = Distributed('{cluster}', default, jaeger_index_local, cityHash64(traceID));
|
||||
CREATE TABLE IF NOT EXISTS jaeger_operations AS jaeger_operations_local ENGINE = Distributed('{cluster}', default, jaeger_operations_local, rand());
|
||||
```
|
||||
|
||||
* The `AS <table-name>` statement creates table with the same schema as the specified one.
|
||||
* The `Distributed` engine takes as parameters cluster , database, table name and sharding key.
|
||||
|
||||
If the distributed table is not created on all Clickhouse nodes the Jaeger query fails to get the data from the storage.
|
||||
|
||||
### Deploy Clickhouse
|
||||
|
||||
Deploy Clickhouse with 2 shards:
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: clickhouse.altinity.com/v1
|
||||
kind: ClickHouseInstallation
|
||||
metadata:
|
||||
name: jaeger
|
||||
spec:
|
||||
configuration:
|
||||
clusters:
|
||||
- name: cluster1
|
||||
layout:
|
||||
shardsCount: 2
|
||||
EOF
|
||||
```
|
||||
|
||||
Use the following command to run `clickhouse-client` on Clickhouse nodes and create the distributed tables:
|
||||
```bash
|
||||
kubectl exec -it statefulset.apps/chi-jaeger-cluster1-0-0 -- clickhouse-client
|
||||
```
|
||||
|
||||
### Plugin configuration
|
||||
|
||||
The plugin has to be configured to write and read that from the global tables:
|
||||
|
||||
```yaml
|
||||
address: clickhouse-jaeger:9000
|
||||
# database: jaeger
|
||||
spans_table: jaeger_spans
|
||||
spans_index_table: jaeger_index
|
||||
operations_table: jaeger_operations
|
||||
```
|
||||
|
||||
## Replication
|
||||
|
||||
Replication as the name suggest automatically replicates the data across multiple Clickhouse nodes.
|
||||
It is used to accomplish high availability, load scaling and migration/updates.
|
||||
|
||||
The replication uses Zookeeper. Refer to the Clickhouse operator how to deploy Zookeeper.
|
||||
|
||||
Zookeeper allows us to use `ON CLUSTER` to automatically replicate table creation on all nodes.
|
||||
Therefore the following command can be run only on a single Clickhouse node:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE IF NOT EXISTS jaeger ON CLUSTER '{cluster}' ENGINE=Atomic;
|
||||
USE jaeger;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jaeger_spans_local ON CLUSTER '{cluster}' (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
model String CODEC(ZSTD(3))
|
||||
) ENGINE ReplicatedMergeTree
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity=1024;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jaeger_index_local ON CLUSTER '{cluster}' (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
service LowCardinality(String) CODEC(ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC(ZSTD(1)),
|
||||
durationUs UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_tags tags TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE ReplicatedMergeTree
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity=1024;
|
||||
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS jaeger_operations_local ON CLUSTER '{cluster}'
|
||||
ENGINE ReplicatedMergeTree
|
||||
PARTITION BY toYYYYMM(date) ORDER BY (date, service, operation)
|
||||
SETTINGS index_granularity=32
|
||||
POPULATE
|
||||
AS SELECT
|
||||
toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() as count
|
||||
FROM jaeger.jaeger_index_local
|
||||
GROUP BY date, service, operation;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jaeger_spans ON CLUSTER '{cluster}' AS jaeger.jaeger_spans_local ENGINE = Distributed('{cluster}', jaeger, jaeger_spans_local, cityHash64(traceID));
|
||||
CREATE TABLE IF NOT EXISTS jaeger_index ON CLUSTER '{cluster}' AS jaeger.jaeger_index_local ENGINE = Distributed('{cluster}', jaeger, jaeger_index_local, cityHash64(traceID));
|
||||
CREATE TABLE IF NOT EXISTS jaeger_operations on CLUSTER '{cluster}' AS jaeger.jaeger_operations_local ENGINE = Distributed('{cluster}', jaeger, jaeger_operations_local, rand());
|
||||
```
|
||||
|
||||
### Deploy Clickhouse
|
||||
|
||||
Before deploying Clickhouse make sure Zookeeper is running in `zoo1ns` namespace.
|
||||
|
||||
Deploy Clickhouse with 3 shards and 2 replicas. In total Clickhouse operator will deploy 6 pods:
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: clickhouse.altinity.com/v1
|
||||
kind: ClickHouseInstallation
|
||||
metadata:
|
||||
name: jaeger
|
||||
spec:
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: data-volume-template
|
||||
logVolumeClaimTemplate: log-volume-template
|
||||
configuration:
|
||||
zookeeper:
|
||||
nodes:
|
||||
- host: zookeeper.zoo1ns
|
||||
clusters:
|
||||
- name: cluster1
|
||||
layout:
|
||||
shardsCount: 3
|
||||
replicasCount: 2
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: data-volume-template
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
- name: log-volume-template
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
EOF
|
||||
```
|
||||
|
||||
The Clickhouse deployment will look like this:
|
||||
```bash
|
||||
k get statefulsets
|
||||
NAME READY AGE
|
||||
chi-jaeger-cluster1-0-0 1/1 17m # shard 0
|
||||
chi-jaeger-cluster1-0-1 1/1 17m # shard 0, replica 1
|
||||
chi-jaeger-cluster1-1-0 1/1 16m # shard 1
|
||||
chi-jaeger-cluster1-1-1 1/1 16m # shard 1, replica 1
|
||||
chi-jaeger-cluster1-2-0 1/1 7m43s # shard 2
|
||||
chi-jaeger-cluster1-2-1 1/1 7m26s # shard 2, replica 1
|
||||
```
|
||||
|
||||
#### Scaling up
|
||||
|
||||
Just increase `shardsCount` number and new Clickhouse node will come up. It will have initialized Jaeger tables so
|
||||
no other steps are required. Note that the old data are not re-balanced, only new writes take into the account
|
||||
the new node.
|
||||
|
||||
## Useful Commands
|
||||
|
||||
### SQL
|
||||
|
||||
```sql
|
||||
show tables;
|
||||
select count() from jaeger_spans;
|
||||
```
|
||||
|
||||
### Kubectl
|
||||
|
||||
```bash
|
||||
kubectl get chi -o wide
|
||||
kubectl port-forward service/clickhouse-jaeger 9000:9000
|
||||
kubectl delete chi jaeger
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
address: localhost:9000
|
||||
init_sql_scripts_dir: init_sql_scripts
|
||||
init_tables: true
|
|
@ -0,0 +1,107 @@
|
|||
// Copyright (c) 2019 The Jaeger Authors.
|
||||
// Copyright (c) 2018 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/jaegertracing/jaeger/pkg/config"
|
||||
"github.com/jaegertracing/jaeger/pkg/metrics"
|
||||
"github.com/jaegertracing/jaeger/pkg/testutils"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/integration"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const defaultPluginBinaryPath = "../../../examples/memstore-plugin/memstore-plugin"
|
||||
|
||||
type GRPCStorageIntegrationTestSuite struct {
|
||||
integration.StorageIntegration
|
||||
logger *zap.Logger
|
||||
pluginBinaryPath string
|
||||
pluginConfigPath string
|
||||
}
|
||||
|
||||
func (s *GRPCStorageIntegrationTestSuite) initialize() error {
|
||||
s.logger, _ = testutils.NewLogger()
|
||||
|
||||
f := grpc.NewFactory()
|
||||
v, command := config.Viperize(f.AddFlags)
|
||||
flags := []string{
|
||||
"--grpc-storage-plugin.binary",
|
||||
s.pluginBinaryPath,
|
||||
"--grpc-storage-plugin.log-level",
|
||||
"debug",
|
||||
}
|
||||
if s.pluginConfigPath != "" {
|
||||
flags = append(flags,
|
||||
"--grpc-storage-plugin.configuration-file",
|
||||
s.pluginConfigPath,
|
||||
)
|
||||
}
|
||||
err := command.ParseFlags(flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.InitFromViper(v, zap.NewNop())
|
||||
if err = f.Initialize(metrics.NullFactory, s.logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.SpanWriter, err = f.CreateSpanWriter(); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.SpanReader, err = f.CreateSpanReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO DependencyWriter is not implemented in grpc store
|
||||
|
||||
s.Refresh = s.refresh
|
||||
s.CleanUp = s.cleanUp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GRPCStorageIntegrationTestSuite) refresh() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GRPCStorageIntegrationTestSuite) cleanUp() error {
|
||||
return s.initialize()
|
||||
}
|
||||
|
||||
func TestGRPCStorage(t *testing.T) {
|
||||
if os.Getenv("STORAGE") != "grpc-plugin" {
|
||||
t.Skip("Integration test against grpc skipped; set STORAGE env var to grpc-plugin to run this")
|
||||
}
|
||||
binaryPath := os.Getenv("PLUGIN_BINARY_PATH")
|
||||
if binaryPath == "" {
|
||||
t.Logf("PLUGIN_BINARY_PATH env var not set, using %s", defaultPluginBinaryPath)
|
||||
binaryPath = defaultPluginBinaryPath
|
||||
}
|
||||
configPath := os.Getenv("PLUGIN_CONFIG_PATH")
|
||||
if configPath == "" {
|
||||
t.Log("PLUGIN_CONFIG_PATH env var not set")
|
||||
}
|
||||
s := &GRPCStorageIntegrationTestSuite{
|
||||
pluginBinaryPath: binaryPath,
|
||||
pluginConfigPath: configPath,
|
||||
}
|
||||
require.NoError(t, s.initialize())
|
||||
s.IntegrationTestAll(t)
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
DROP DATABASE IF EXISTS default;
|
|
@ -0,0 +1 @@
|
|||
CREATE DATABASE IF NOT EXISTS default;
|
|
@ -1,5 +1,145 @@
|
|||
module github.com/pavolloffay/jaeger-clickhouse/internal/tools
|
||||
module github.com/jaegertracing/jaeger-clickhouse/internal/tools
|
||||
|
||||
go 1.16
|
||||
go 1.19
|
||||
|
||||
require golang.org/x/tools v0.1.5
|
||||
require (
|
||||
github.com/golangci/golangci-lint v1.41.1
|
||||
golang.org/x/tools v0.1.5
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.0.1 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.2.0 // indirect
|
||||
github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bkielbasa/cyclop v1.2.0 // indirect
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/charithe/durationcheck v0.0.8 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
|
||||
github.com/daixiang0/gci v0.2.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingajkin/go-header v0.4.2 // indirect
|
||||
github.com/esimonov/ifshort v1.0.2 // indirect
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
github.com/fatih/color v1.12.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/fzipp/gocyclo v0.3.1 // indirect
|
||||
github.com/go-critic/go-critic v0.5.6 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astfmt v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astp v1.0.0 // indirect
|
||||
github.com/go-toolsmith/strparse v1.0.0 // indirect
|
||||
github.com/go-toolsmith/typep v1.0.2 // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gofrs/flock v0.8.0 // indirect
|
||||
github.com/golang/protobuf v1.4.3 // indirect
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
||||
github.com/golangci/misspell v0.3.5 // indirect
|
||||
github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/go-cmp v0.5.4 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.4.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.1 // indirect
|
||||
github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 // indirect
|
||||
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.0 // indirect
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d // indirect
|
||||
github.com/kisielk/errcheck v1.6.0 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/kulti/thelper v0.4.0 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.2 // indirect
|
||||
github.com/kyoh86/exportloopref v0.1.8 // indirect
|
||||
github.com/ldez/gomoddirectives v0.2.1 // indirect
|
||||
github.com/ldez/tagliatelle v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/maratori/testpackage v1.0.1 // indirect
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 // indirect
|
||||
github.com/mgechev/revive v1.0.7 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/nakabonne/nestif v0.3.0 // indirect
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||
github.com/nishanths/exhaustive v0.1.0 // indirect
|
||||
github.com/nishanths/predeclared v0.2.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pelletier/go-toml v1.2.0 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea // indirect
|
||||
github.com/prometheus/client_golang v1.7.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.10.0 // indirect
|
||||
github.com/prometheus/procfs v0.1.3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.4 // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
||||
github.com/ryancurrah/gomodguard v1.2.2 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/securego/gosec/v2 v2.8.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.1.2 // indirect
|
||||
github.com/spf13/cast v1.3.0 // indirect
|
||||
github.com/spf13/cobra v1.1.3 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.7.1 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.1.0 // indirect
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b // indirect
|
||||
github.com/tetafro/godot v1.4.7 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.1.0 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.4.0 // indirect
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.4 // indirect
|
||||
github.com/uudashr/gocognit v1.0.1 // indirect
|
||||
github.com/yeya24/promlinter v0.1.0 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
gopkg.in/ini.v1 v1.51.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
honnef.co/go/tools v0.2.0 // indirect
|
||||
mvdan.cc/gofumpt v0.1.1 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 // indirect
|
||||
)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/golangci/golangci-lint/cmd/golangci-lint"
|
||||
_ "golang.org/x/tools/cmd/goimports"
|
||||
)
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"dependencies": {
|
||||
"dagMaxNumServices": 200,
|
||||
"menuEnabled": true
|
||||
},
|
||||
"archiveEnabled": true
|
||||
}
|
||||
|
After Width: | Height: | Size: 127 KiB |
|
@ -1,13 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS jaeger_index_v2 (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
service LowCardinality(String) CODEC(ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC(ZSTD(1)),
|
||||
durationUs UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_tags tags TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,8 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS jaeger_spans_v2 (
|
||||
timestamp DateTime CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
model String CODEC(ZSTD(3))
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity=1024
|
|
@ -1,12 +0,0 @@
|
|||
CREATE MATERIALIZED VIEW IF NOT EXISTS jaeger_operations_v2
|
||||
ENGINE SummingMergeTree
|
||||
PARTITION BY toYYYYMM(date) ORDER BY (date, service, operation)
|
||||
SETTINGS index_granularity=32
|
||||
POPULATE
|
||||
AS SELECT
|
||||
toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() as count
|
||||
FROM jaeger_index_v2
|
||||
GROUP BY date, service, operation
|
|
@ -0,0 +1,3 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.Table}}
|
||||
ON CLUSTER '{cluster}' AS {{.Database}}.{{.Table}}_local
|
||||
ENGINE = Distributed('{cluster}', {{.Database}}, {{.Table}}_local, {{.Hash}})
|
|
@ -0,0 +1,28 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.SpansIndexTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
(
|
||||
{{if .Multitenant -}}
|
||||
tenant LowCardinality(String) CODEC (ZSTD(1)),
|
||||
{{- end -}}
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
service LowCardinality(String) CODEC (ZSTD(1)),
|
||||
operation LowCardinality(String) CODEC (ZSTD(1)),
|
||||
durationUs UInt64 CODEC (ZSTD(1)),
|
||||
tags Nested
|
||||
(
|
||||
key LowCardinality(String),
|
||||
value String
|
||||
) CODEC (ZSTD(1)),
|
||||
INDEX idx_tag_keys tags.key TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationUs TYPE minmax GRANULARITY 1
|
||||
) ENGINE {{if .Replication}}ReplicatedMergeTree{{else}}MergeTree(){{end}}
|
||||
{{.TTLTimestamp}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toDate(timestamp)
|
||||
)
|
||||
ORDER BY (service, -toUnixTimestamp(timestamp))
|
||||
SETTINGS index_granularity = 1024
|
|
@ -0,0 +1,43 @@
|
|||
CREATE MATERIALIZED VIEW IF NOT EXISTS {{.OperationsTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
ENGINE {{if .Replication}}ReplicatedSummingMergeTree{{else}}SummingMergeTree{{end}}
|
||||
{{.TTLDate}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toYYYYMM(date)
|
||||
)
|
||||
ORDER BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
date,
|
||||
service,
|
||||
operation
|
||||
)
|
||||
SETTINGS index_granularity = 32
|
||||
POPULATE
|
||||
AS SELECT
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toDate(timestamp) AS date,
|
||||
service,
|
||||
operation,
|
||||
count() AS count,
|
||||
if(
|
||||
has(tags.key, 'span.kind'),
|
||||
tags.value[indexOf(tags.key, 'span.kind')],
|
||||
''
|
||||
) AS spankind
|
||||
FROM {{.Database}}.{{.SpansIndexTable}}
|
||||
GROUP BY
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
date,
|
||||
service,
|
||||
operation,
|
||||
tags.key,
|
||||
tags.value
|
|
@ -0,0 +1,19 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.SpansArchiveTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
(
|
||||
{{if .Multitenant -}}
|
||||
tenant LowCardinality(String) CODEC (ZSTD(1)),
|
||||
{{- end -}}
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
model String CODEC (ZSTD(3))
|
||||
) ENGINE {{if .Replication}}ReplicatedMergeTree{{else}}MergeTree(){{end}}
|
||||
{{.TTLTimestamp}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toYYYYMM(timestamp)
|
||||
)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity = 1024
|
|
@ -0,0 +1,19 @@
|
|||
CREATE TABLE IF NOT EXISTS {{.SpansTable}}
|
||||
{{if .Replication}}ON CLUSTER '{cluster}'{{end}}
|
||||
(
|
||||
{{if .Multitenant -}}
|
||||
tenant LowCardinality(String) CODEC (ZSTD(1)),
|
||||
{{- end -}}
|
||||
timestamp DateTime CODEC (Delta, ZSTD(1)),
|
||||
traceID String CODEC (ZSTD(1)),
|
||||
model String CODEC (ZSTD(3))
|
||||
) ENGINE {{if .Replication}}ReplicatedMergeTree{{else}}MergeTree(){{end}}
|
||||
{{.TTLTimestamp}}
|
||||
PARTITION BY (
|
||||
{{if .Multitenant -}}
|
||||
tenant,
|
||||
{{- end -}}
|
||||
toDate(timestamp)
|
||||
)
|
||||
ORDER BY traceID
|
||||
SETTINGS index_granularity = 1024
|
|
@ -0,0 +1,18 @@
|
|||
package clickhousedependencystore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDependencyStore_GetDependencies(t *testing.T) {
|
||||
dependencyStore := NewDependencyStore()
|
||||
|
||||
dependencies, err := dependencyStore.GetDependencies(context.Background(), time.Now(), time.Hour)
|
||||
|
||||
assert.EqualError(t, err, errNotImplemented.Error())
|
||||
assert.Nil(t, dependencies)
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
_ heap.Interface = workerHeap{}
|
||||
errWorkerNotFound = fmt.Errorf("worker not found in heap")
|
||||
)
|
||||
|
||||
type heapItem struct {
|
||||
pushTime time.Time
|
||||
worker *WriteWorker
|
||||
}
|
||||
|
||||
// workerHeap is a heap for WriteWorkers where worker's push time is the key.
|
||||
type workerHeap struct {
|
||||
elems *[]*heapItem
|
||||
indexes map[*WriteWorker]int
|
||||
}
|
||||
|
||||
func newWorkerHeap(cap int) workerHeap {
|
||||
elems := make([]*heapItem, 0, cap)
|
||||
return workerHeap{
|
||||
elems: &elems,
|
||||
indexes: make(map[*WriteWorker]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (workerHeap workerHeap) AddWorker(worker *WriteWorker) {
|
||||
heap.Push(workerHeap, heapItem{
|
||||
worker: worker,
|
||||
pushTime: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
func (workerHeap *workerHeap) RemoveWorker(worker *WriteWorker) error {
|
||||
idx, ok := workerHeap.indexes[worker]
|
||||
if !ok {
|
||||
return errWorkerNotFound
|
||||
}
|
||||
heap.Remove(workerHeap, idx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (workerHeap *workerHeap) CloseWorkers() {
|
||||
for _, item := range *workerHeap.elems {
|
||||
item.worker.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (workerHeap workerHeap) Len() int {
|
||||
return len(*workerHeap.elems)
|
||||
}
|
||||
|
||||
func (workerHeap workerHeap) Less(i, j int) bool {
|
||||
return (*workerHeap.elems)[i].pushTime.Before((*workerHeap.elems)[j].pushTime)
|
||||
}
|
||||
|
||||
func (workerHeap workerHeap) Swap(i, j int) {
|
||||
(*workerHeap.elems)[i], (*workerHeap.elems)[j] = (*workerHeap.elems)[j], (*workerHeap.elems)[i]
|
||||
workerHeap.indexes[(*workerHeap.elems)[i].worker] = i
|
||||
workerHeap.indexes[(*workerHeap.elems)[j].worker] = j
|
||||
}
|
||||
|
||||
func (workerHeap workerHeap) Push(x interface{}) {
|
||||
switch t := x.(type) {
|
||||
case heapItem:
|
||||
*workerHeap.elems = append(*workerHeap.elems, &t)
|
||||
workerHeap.indexes[t.worker] = len(*workerHeap.elems) - 1
|
||||
default:
|
||||
panic("Unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func (workerHeap workerHeap) Pop() interface{} {
|
||||
lastInd := len(*workerHeap.elems) - 1
|
||||
last := (*workerHeap.elems)[lastInd]
|
||||
delete(workerHeap.indexes, last.worker)
|
||||
*workerHeap.elems = (*workerHeap.elems)[:lastInd]
|
||||
return last.worker
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
)
|
||||
|
||||
var _ driver.ValueConverter = ConverterMock{}
|
||||
|
||||
type ConverterMock struct{}
|
||||
|
||||
func (conv ConverterMock) ConvertValue(v interface{}) (driver.Value, error) {
|
||||
switch t := v.(type) {
|
||||
case model.TraceID:
|
||||
return driver.Value(t.String()), nil
|
||||
case time.Time:
|
||||
return driver.Value(t), nil
|
||||
case time.Duration:
|
||||
return driver.Value(t.Nanoseconds()), nil
|
||||
case model.SpanID:
|
||||
return driver.Value(t), nil
|
||||
case string:
|
||||
return driver.Value(t), nil
|
||||
case []uint8:
|
||||
return driver.Value(t), nil
|
||||
case int64:
|
||||
return driver.Value(t), nil
|
||||
case uint64:
|
||||
return driver.Value(t), nil
|
||||
case int:
|
||||
return driver.Value(t), nil
|
||||
case []string:
|
||||
return driver.Value(fmt.Sprint(t)), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown type %T", t)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConverterMock_ConvertValue(t *testing.T) {
|
||||
converter := ConverterMock{}
|
||||
|
||||
testCases := map[string]struct {
|
||||
valueToConvert interface{}
|
||||
expectedResult driver.Value
|
||||
}{
|
||||
"string value": {valueToConvert: "some string value", expectedResult: driver.Value("some string value")},
|
||||
"string slice value": {valueToConvert: []string{"some", "slice", "of", "strings"}, expectedResult: driver.Value("[some slice of strings]")},
|
||||
"time value": {
|
||||
valueToConvert: time.Date(2002, time.February, 19, 14, 43, 51, 0, time.UTC),
|
||||
expectedResult: driver.Value(time.Date(2002, time.February, 19, 14, 43, 51, 0, time.UTC)),
|
||||
},
|
||||
"duration value": {
|
||||
valueToConvert: time.Unix(12340, 123456789).Sub(time.Unix(0, 0)),
|
||||
expectedResult: driver.Value(int64(12340123456789)),
|
||||
},
|
||||
"int64 value": {valueToConvert: int64(1823), expectedResult: driver.Value(int64(1823))},
|
||||
"int value": {valueToConvert: 1823, expectedResult: driver.Value(1823)},
|
||||
"model.SpanID value": {valueToConvert: model.SpanID(318148), expectedResult: driver.Value(model.SpanID(318148))},
|
||||
"model.TraceID value": {valueToConvert: model.TraceID{Low: 0xabd5, High: 0xa31}, expectedResult: driver.Value("0000000000000a31000000000000abd5")},
|
||||
"uint8 slice value": {valueToConvert: []uint8("asdkja"), expectedResult: driver.Value([]uint8{0x61, 0x73, 0x64, 0x6b, 0x6a, 0x61})},
|
||||
}
|
||||
|
||||
for name, test := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
converted, err := converter.ConvertValue(test.valueToConvert)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expectedResult, converted)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConverterMock_Fail(t *testing.T) {
|
||||
converter := ConverterMock{}
|
||||
|
||||
tests := map[string]struct {
|
||||
valueToConvert interface{}
|
||||
expectedErrorMsg string
|
||||
}{
|
||||
"float64 value": {valueToConvert: float64(1e-4), expectedErrorMsg: "unknown type float64"},
|
||||
"int32 value": {valueToConvert: int32(12831), expectedErrorMsg: "unknown type int32"},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
val, err := converter.ConvertValue(test.valueToConvert)
|
||||
assert.Equal(t, nil, val)
|
||||
assert.EqualError(t, err, test.expectedErrorMsg)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
func GetDbMock() (*sql.DB, sqlmock.Sqlmock, error) {
|
||||
return sqlmock.New(
|
||||
sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
|
||||
sqlmock.ValueConverterOption(ConverterMock{}),
|
||||
)
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const levelCount = 5
|
||||
|
||||
var _ hclog.Logger = SpyLogger{}
|
||||
|
||||
type LogMock struct {
|
||||
Msg string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
type SpyLogger struct {
|
||||
logs [][]LogMock
|
||||
}
|
||||
|
||||
func NewSpyLogger() SpyLogger {
|
||||
return SpyLogger{logs: make([][]LogMock, levelCount)}
|
||||
}
|
||||
|
||||
func (logger *SpyLogger) AssertLogsOfLevelEqual(t *testing.T, level hclog.Level, want []LogMock) {
|
||||
assert.Equal(t, want, logger.getLogs(level))
|
||||
}
|
||||
|
||||
func (logger *SpyLogger) getLogs(level hclog.Level) []LogMock {
|
||||
return logger.logs[level-1]
|
||||
}
|
||||
|
||||
func (logger *SpyLogger) AssertLogsEmpty(t *testing.T) {
|
||||
assert.Equal(t, logger.logs, make([][]LogMock, levelCount))
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Log(level hclog.Level, msg string, args ...interface{}) {
|
||||
logger.logs[level-1] = append(logger.getLogs(level), LogMock{msg, args})
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Trace(msg string, args ...interface{}) {
|
||||
logger.Log(hclog.Trace, msg, args...)
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Debug(msg string, args ...interface{}) {
|
||||
logger.Log(hclog.Debug, msg, args...)
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Info(msg string, args ...interface{}) {
|
||||
logger.Log(hclog.Info, msg, args...)
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Warn(msg string, args ...interface{}) {
|
||||
logger.Log(hclog.Warn, msg, args...)
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Error(msg string, args ...interface{}) {
|
||||
logger.Log(hclog.Error, msg, args...)
|
||||
}
|
||||
|
||||
func (logger SpyLogger) IsTrace() bool {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) IsDebug() bool {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) IsInfo() bool {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) IsWarn() bool {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) IsError() bool {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) ImpliedArgs() []interface{} {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) With(args ...interface{}) hclog.Logger {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Name() string {
|
||||
return "spy logger"
|
||||
}
|
||||
|
||||
func (logger SpyLogger) Named(name string) hclog.Logger {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) ResetNamed(name string) hclog.Logger {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) SetLevel(level hclog.Level) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (logger SpyLogger) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer {
|
||||
panic("implement me")
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLogCount = 80
|
||||
maxArgCount = 10
|
||||
)
|
||||
|
||||
func TestSpyLogger_AssertLogsEmpty(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
logger.AssertLogsEmpty(t)
|
||||
}
|
||||
|
||||
func TestSpyLogger_AssertLogsOfLevelEqualNoArgs(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
var logs = make([][]LogMock, levelCount)
|
||||
for level, levelLogs := range logs {
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
levelLogs = append(levelLogs, LogMock{Msg: msg})
|
||||
logger.Log(hclog.Level(level+1), msg)
|
||||
}
|
||||
logs[level] = levelLogs
|
||||
}
|
||||
|
||||
for level, levelLogs := range logs {
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Level(level+1), levelLogs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpyLogger_AssertLogsOfLevelEqualArgs(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
var logs = make([][]LogMock, levelCount)
|
||||
for level, levelLogs := range logs {
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
args := generateArgs(rand.Intn(maxArgCount))
|
||||
levelLogs = append(levelLogs, LogMock{Msg: msg, Args: args})
|
||||
logger.Log(hclog.Level(level+1), msg, args...)
|
||||
}
|
||||
logs[level] = levelLogs
|
||||
}
|
||||
|
||||
for level, levelLogs := range logs {
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Level(level+1), levelLogs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpyLogger_Trace(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
logs := make([]LogMock, 0, logsCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
args := generateArgs(rand.Intn(maxArgCount))
|
||||
logs = append(logs, LogMock{Msg: msg, Args: args})
|
||||
logger.Trace(msg, args...)
|
||||
}
|
||||
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Trace, logs)
|
||||
}
|
||||
|
||||
func TestSpyLogger_Debug(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
logs := make([]LogMock, 0, logsCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
args := generateArgs(rand.Intn(maxArgCount))
|
||||
logs = append(logs, LogMock{Msg: msg, Args: args})
|
||||
logger.Debug(msg, args...)
|
||||
}
|
||||
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Debug, logs)
|
||||
}
|
||||
|
||||
func TestSpyLogger_Info(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
logs := make([]LogMock, 0, logsCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
args := generateArgs(rand.Intn(maxArgCount))
|
||||
logs = append(logs, LogMock{Msg: msg, Args: args})
|
||||
logger.Info(msg, args...)
|
||||
}
|
||||
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Info, logs)
|
||||
}
|
||||
|
||||
func TestSpyLogger_Warn(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
logs := make([]LogMock, 0, logsCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
args := generateArgs(rand.Intn(maxArgCount))
|
||||
logs = append(logs, LogMock{Msg: msg, Args: args})
|
||||
logger.Warn(msg, args...)
|
||||
}
|
||||
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Warn, logs)
|
||||
}
|
||||
|
||||
func TestSpyLogger_Error(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
logsCount := rand.Intn(maxLogCount)
|
||||
logs := make([]LogMock, 0, logsCount)
|
||||
for i := 0; i < logsCount; i++ {
|
||||
msg := "msg" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
args := generateArgs(rand.Intn(maxArgCount))
|
||||
logs = append(logs, LogMock{Msg: msg, Args: args})
|
||||
logger.Error(msg, args...)
|
||||
}
|
||||
|
||||
logger.AssertLogsOfLevelEqual(t, hclog.Error, logs)
|
||||
}
|
||||
|
||||
func TestSpyLogger_Name(t *testing.T) {
|
||||
assert.Equal(t, "spy logger", NewSpyLogger().Name())
|
||||
}
|
||||
|
||||
func TestNotImplemented(t *testing.T) {
|
||||
logger := NewSpyLogger()
|
||||
|
||||
tests := map[string]struct {
|
||||
function assert.PanicTestFunc
|
||||
}{
|
||||
"is_trace": {function: func() { _ = logger.IsTrace() }},
|
||||
"is_debug": {function: func() { _ = logger.IsDebug() }},
|
||||
"is_info": {function: func() { _ = logger.IsInfo() }},
|
||||
"is_warn": {function: func() { _ = logger.IsWarn() }},
|
||||
"is_error": {function: func() { _ = logger.IsError() }},
|
||||
"implied_args": {function: func() { _ = logger.ImpliedArgs() }},
|
||||
"with": {function: func() { _ = logger.With() }},
|
||||
"named": {function: func() { _ = logger.Named("") }},
|
||||
"reset_named": {function: func() { _ = logger.ResetNamed("") }},
|
||||
"set_level": {function: func() { logger.SetLevel(hclog.NoLevel) }},
|
||||
"standard_logger": {function: func() { _ = logger.StandardLogger(nil) }},
|
||||
"standard_writer": {function: func() { _ = logger.StandardWriter(nil) }},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert.Panics(t, test.function, "implement me")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func generateArgs(count int) []interface{} {
|
||||
args := make([]interface{}, 0, 2*count)
|
||||
for j := 0; j < count; j++ {
|
||||
args = append(
|
||||
args,
|
||||
"key"+strconv.FormatUint(rand.Uint64(), 10),
|
||||
"value"+strconv.FormatUint(rand.Uint64(), 10),
|
||||
)
|
||||
}
|
||||
return args
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
// WorkerParams contains parameters that are shared between WriteWorkers
|
||||
type WorkerParams struct {
|
||||
logger hclog.Logger
|
||||
db *sql.DB
|
||||
indexTable TableName
|
||||
spansTable TableName
|
||||
tenant string
|
||||
encoding Encoding
|
||||
delay time.Duration
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
numDiscardedSpans = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "jaeger_clickhouse_discarded_spans",
|
||||
Help: "Count of spans that have been discarded due to pending writes exceeding max_span_count",
|
||||
})
|
||||
numPendingSpans = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "jaeger_clickhouse_pending_spans",
|
||||
Help: "Number of spans that are currently pending, counts against max_span_count",
|
||||
})
|
||||
)
|
||||
|
||||
// WriteWorkerPool is a worker pool for writing batches of spans.
|
||||
// Given a new batch, WriteWorkerPool creates a new WriteWorker.
|
||||
// If the number of currently processed spans if more than maxSpanCount, then the oldest worker is removed.
|
||||
type WriteWorkerPool struct {
|
||||
params *WorkerParams
|
||||
|
||||
finish chan bool
|
||||
done sync.WaitGroup
|
||||
batches chan []*model.Span
|
||||
|
||||
maxSpanCount int
|
||||
mutex sync.Mutex
|
||||
workers workerHeap
|
||||
workerDone chan *WriteWorker
|
||||
}
|
||||
|
||||
var registerPoolMetrics sync.Once
|
||||
|
||||
func NewWorkerPool(params *WorkerParams, maxSpanCount int) WriteWorkerPool {
|
||||
registerPoolMetrics.Do(func() {
|
||||
prometheus.MustRegister(numDiscardedSpans, numPendingSpans)
|
||||
})
|
||||
|
||||
return WriteWorkerPool{
|
||||
params: params,
|
||||
finish: make(chan bool),
|
||||
done: sync.WaitGroup{},
|
||||
batches: make(chan []*model.Span),
|
||||
|
||||
mutex: sync.Mutex{},
|
||||
workers: newWorkerHeap(100),
|
||||
workerDone: make(chan *WriteWorker),
|
||||
|
||||
maxSpanCount: maxSpanCount,
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *WriteWorkerPool) Work() {
|
||||
finish := false
|
||||
nextWorkerID := int32(1)
|
||||
pendingSpanCount := 0
|
||||
for {
|
||||
// Initialize to zero, or update value from previous loop
|
||||
numPendingSpans.Set(float64(pendingSpanCount))
|
||||
|
||||
pool.done.Add(1)
|
||||
select {
|
||||
case batch := <-pool.batches:
|
||||
batchSize := len(batch)
|
||||
if pool.checkLimit(pendingSpanCount, batchSize) {
|
||||
// Limit disabled or batch fits within limit, write the batch.
|
||||
worker := WriteWorker{
|
||||
workerID: nextWorkerID,
|
||||
|
||||
params: pool.params,
|
||||
batch: batch,
|
||||
|
||||
finish: make(chan bool),
|
||||
workerDone: pool.workerDone,
|
||||
done: sync.WaitGroup{},
|
||||
}
|
||||
if nextWorkerID == math.MaxInt32 {
|
||||
nextWorkerID = 1
|
||||
} else {
|
||||
nextWorkerID++
|
||||
}
|
||||
pool.workers.AddWorker(&worker)
|
||||
pendingSpanCount += batchSize
|
||||
go worker.Work()
|
||||
} else {
|
||||
// Limit exceeded, complain
|
||||
numDiscardedSpans.Add(float64(batchSize))
|
||||
pool.params.logger.Error("Discarding batch of spans due to exceeding pending span count", "batch_size", batchSize, "pending_span_count", pendingSpanCount, "max_span_count", pool.maxSpanCount)
|
||||
}
|
||||
case worker := <-pool.workerDone:
|
||||
// The worker has finished, subtract its work from the count and clean it from the heap.
|
||||
pendingSpanCount -= len(worker.batch)
|
||||
if err := pool.workers.RemoveWorker(worker); err != nil {
|
||||
pool.params.logger.Error("could not remove worker", "worker", worker, "error", err)
|
||||
}
|
||||
case <-pool.finish:
|
||||
pool.workers.CloseWorkers()
|
||||
finish = true
|
||||
}
|
||||
pool.done.Done()
|
||||
|
||||
if finish {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *WriteWorkerPool) WriteBatch(batch []*model.Span) {
|
||||
pool.batches <- batch
|
||||
}
|
||||
|
||||
func (pool *WriteWorkerPool) Close() {
|
||||
pool.finish <- true
|
||||
pool.done.Wait()
|
||||
}
|
||||
|
||||
// checkLimit returns whether batchSize fits within the maxSpanCount
|
||||
func (pool *WriteWorkerPool) checkLimit(pendingSpanCount int, batchSize int) bool {
|
||||
if pool.maxSpanCount <= 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check limit, add batchSize if within limit
|
||||
return pendingSpanCount+batchSize <= pool.maxSpanCount
|
||||
}
|
|
@ -10,10 +10,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -31,20 +30,24 @@ var (
|
|||
// TraceReader for reading spans from ClickHouse
|
||||
type TraceReader struct {
|
||||
db *sql.DB
|
||||
operationsTable string
|
||||
indexTable string
|
||||
spansTable string
|
||||
operationsTable TableName
|
||||
indexTable TableName
|
||||
spansTable TableName
|
||||
tenant string
|
||||
maxNumSpans uint
|
||||
}
|
||||
|
||||
var _ spanstore.Reader = (*TraceReader)(nil)
|
||||
|
||||
// NewTraceReader returns a TraceReader for the database
|
||||
func NewTraceReader(db *sql.DB, operationsTable, indexTable, spansTable string) *TraceReader {
|
||||
func NewTraceReader(db *sql.DB, operationsTable, indexTable, spansTable TableName, tenant string, maxNumSpans uint) *TraceReader {
|
||||
return &TraceReader{
|
||||
db: db,
|
||||
operationsTable: operationsTable,
|
||||
indexTable: indexTable,
|
||||
spansTable: spansTable,
|
||||
tenant: tenant,
|
||||
maxNumSpans: maxNumSpans,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,19 +61,29 @@ func (r *TraceReader) getTraces(ctx context.Context, traceIDs []model.TraceID) (
|
|||
span, _ := opentracing.StartSpanFromContext(ctx, "getTraces")
|
||||
defer span.Finish()
|
||||
|
||||
values := make([]interface{}, len(traceIDs))
|
||||
args := make([]interface{}, len(traceIDs))
|
||||
for i, traceID := range traceIDs {
|
||||
values[i] = traceID.String()
|
||||
args[i] = traceID.String()
|
||||
}
|
||||
|
||||
// It's more efficient to do PREWHERE on traceID to then only read needed models:
|
||||
// It's more efficient to do PREWHERE on traceID to the only read needed models:
|
||||
// * https://clickhouse.tech/docs/en/sql-reference/statements/select/prewhere/
|
||||
query := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (%s)", r.spansTable, "?"+strings.Repeat(",?", len(values)-1))
|
||||
//nolint:gosec , G201: SQL string formatting
|
||||
query := fmt.Sprintf("SELECT model FROM %s PREWHERE traceID IN (%s)", r.spansTable, "?"+strings.Repeat(",?", len(traceIDs)-1))
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " AND tenant = ?"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
if r.maxNumSpans > 0 {
|
||||
query += fmt.Sprintf(" ORDER BY timestamp LIMIT %d BY traceID", r.maxNumSpans)
|
||||
}
|
||||
|
||||
span.SetTag("db.statement", query)
|
||||
span.SetTag("db.args", values)
|
||||
span.SetTag("db.args", args)
|
||||
|
||||
rows, err := r.db.QueryContext(ctx, query, values...)
|
||||
rows, err := r.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -82,7 +95,8 @@ func (r *TraceReader) getTraces(ctx context.Context, traceIDs []model.TraceID) (
|
|||
for rows.Next() {
|
||||
var serialized string
|
||||
|
||||
if err := rows.Scan(&serialized); err != nil {
|
||||
err = rows.Scan(&serialized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -143,7 +157,7 @@ func (r *TraceReader) getStrings(ctx context.Context, sql string, args ...interf
|
|||
|
||||
defer rows.Close()
|
||||
|
||||
values := []string{}
|
||||
values := make([]string, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var value string
|
||||
|
@ -169,11 +183,19 @@ func (r *TraceReader) GetServices(ctx context.Context) ([]string, error) {
|
|||
return nil, errNoOperationsTable
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("SELECT service FROM %s GROUP BY service", r.operationsTable)
|
||||
query := fmt.Sprintf("SELECT service FROM %s", r.operationsTable)
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " WHERE tenant = ?"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
query += " GROUP BY service"
|
||||
span.SetTag("db.statement", query)
|
||||
span.SetTag("db.args", args)
|
||||
|
||||
return r.getStrings(ctx, query)
|
||||
return r.getStrings(ctx, query, args...)
|
||||
}
|
||||
|
||||
// GetOperations fetches operations in the service and empty slice if service does not exists
|
||||
|
@ -188,20 +210,44 @@ func (r *TraceReader) GetOperations(
|
|||
return nil, errNoOperationsTable
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("SELECT operation FROM %s WHERE service = ? GROUP BY operation", r.operationsTable)
|
||||
args := []interface{}{params.ServiceName}
|
||||
//nolint:gosec , G201: SQL string formatting
|
||||
query := fmt.Sprintf("SELECT operation, spankind FROM %s WHERE", r.operationsTable)
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " tenant = ? AND"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
query += " service = ? GROUP BY operation, spankind ORDER BY operation"
|
||||
args = append(args, params.ServiceName)
|
||||
|
||||
span.SetTag("db.statement", query)
|
||||
span.SetTag("db.args", args)
|
||||
|
||||
names, err := r.getStrings(ctx, query, args...)
|
||||
rows, err := r.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
operations := make([]spanstore.Operation, len(names))
|
||||
for i, name := range names {
|
||||
operations[i].Name = name
|
||||
defer rows.Close()
|
||||
|
||||
operations := make([]spanstore.Operation, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var name, spanKind string
|
||||
if err := rows.Scan(&name, &spanKind); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
operation := spanstore.Operation{Name: name}
|
||||
if spanKind != "" {
|
||||
operation.SpanKind = spanKind
|
||||
}
|
||||
operations = append(operations, operation)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return operations, nil
|
||||
|
@ -249,7 +295,7 @@ func (r *TraceReader) FindTraceIDs(ctx context.Context, params *spanstore.TraceQ
|
|||
timeSpan = minTimespanForProgressiveSearch
|
||||
}
|
||||
|
||||
found := []model.TraceID{}
|
||||
found := make([]model.TraceID, 0)
|
||||
|
||||
for step := 0; step < maxProgressiveSteps; step++ {
|
||||
if len(found) >= params.NumTraces {
|
||||
|
@ -278,7 +324,7 @@ func (r *TraceReader) FindTraceIDs(ctx context.Context, params *spanstore.TraceQ
|
|||
found = append(found, foundInRange...)
|
||||
|
||||
end = start
|
||||
timeSpan = timeSpan * 2
|
||||
timeSpan *= 2
|
||||
}
|
||||
|
||||
return found, nil
|
||||
|
@ -288,7 +334,7 @@ func (r *TraceReader) findTraceIDsInRange(ctx context.Context, params *spanstore
|
|||
span, ctx := opentracing.StartSpanFromContext(ctx, "findTraceIDsInRange")
|
||||
defer span.Finish()
|
||||
|
||||
if end.Before(start) || end.UTC() == start.UTC() {
|
||||
if end.Before(start) || end == start {
|
||||
return []model.TraceID{}, nil
|
||||
}
|
||||
|
||||
|
@ -301,34 +347,36 @@ func (r *TraceReader) findTraceIDsInRange(ctx context.Context, params *spanstore
|
|||
query := fmt.Sprintf("SELECT DISTINCT traceID FROM %s WHERE service = ?", r.indexTable)
|
||||
args := []interface{}{params.ServiceName}
|
||||
|
||||
if r.tenant != "" {
|
||||
query += " AND tenant = ?"
|
||||
args = append(args, r.tenant)
|
||||
}
|
||||
|
||||
if params.OperationName != "" {
|
||||
query = query + " AND operation = ?"
|
||||
query += " AND operation = ?"
|
||||
args = append(args, params.OperationName)
|
||||
}
|
||||
|
||||
query = query + " AND -toUnixTimestamp(timestamp) <= -toUnixTimestamp(?)"
|
||||
args = append(args, start.UTC().Format("2006-01-02T15:04:05"))
|
||||
|
||||
query = query + " AND -toUnixTimestamp(timestamp) >= -toUnixTimestamp(?)"
|
||||
args = append(args, end.UTC().Format("2006-01-02T15:04:05"))
|
||||
query += " AND timestamp >= ? AND timestamp <= ?"
|
||||
args = append(args, start, end)
|
||||
|
||||
if params.DurationMin != 0 {
|
||||
query = query + " AND durationUs >= ?"
|
||||
query += " AND durationUs >= ?"
|
||||
args = append(args, params.DurationMin.Microseconds())
|
||||
}
|
||||
|
||||
if params.DurationMax != 0 {
|
||||
query = query + " AND durationUs <= ?"
|
||||
query += " AND durationUs <= ?"
|
||||
args = append(args, params.DurationMax.Microseconds())
|
||||
}
|
||||
|
||||
for key, value := range params.Tags {
|
||||
query = query + " AND has(tags, ?)"
|
||||
args = append(args, fmt.Sprintf("%s=%s", key, value))
|
||||
query += " AND has(tags.key, ?) AND has(splitByChar(',', tags.value[indexOf(tags.key, ?)]), ?)"
|
||||
args = append(args, key, key, value)
|
||||
}
|
||||
|
||||
if len(skip) > 0 {
|
||||
query = query + fmt.Sprintf(" AND traceID NOT IN (%s)", "?"+strings.Repeat(",?", len(skip)-1))
|
||||
query += fmt.Sprintf(" AND traceID NOT IN (%s)", "?"+strings.Repeat(",?", len(skip)-1))
|
||||
for _, traceID := range skip {
|
||||
args = append(args, traceID.String())
|
||||
}
|
||||
|
@ -336,7 +384,7 @@ func (r *TraceReader) findTraceIDsInRange(ctx context.Context, params *spanstore
|
|||
|
||||
// Sorting by service is required for early termination of primary key scan:
|
||||
// * https://github.com/ClickHouse/ClickHouse/issues/7102
|
||||
query = query + " ORDER BY service, -toUnixTimestamp(timestamp) LIMIT ?"
|
||||
query += " ORDER BY service, timestamp DESC LIMIT ?"
|
||||
args = append(args, params.NumTraces-len(skip))
|
||||
|
||||
span.SetTag("db.statement", query)
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
package clickhousespanstore
|
||||
|
||||
type TableName string
|
||||
|
||||
func (tableName TableName) ToLocal() TableName {
|
||||
return tableName + "_local"
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTableName_ToLocal(t *testing.T) {
|
||||
tableName := TableName("some_table")
|
||||
assert.Equal(t, tableName+"_local", tableName.ToLocal())
|
||||
|
||||
}
|
|
@ -0,0 +1,274 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
)
|
||||
|
||||
var delays = []int{2, 3, 5, 8}
|
||||
|
||||
// WriteWorker writes spans to CLickHouse.
|
||||
// Given a batch of spans, WriteWorker attempts to write them to database.
|
||||
// Interval in seconds between attempts changes due to delays slice, then it remains the same as the last value in delays.
|
||||
type WriteWorker struct {
|
||||
// workerID is an arbitrary identifier for keeping track of this worker in logs
|
||||
workerID int32
|
||||
params *WorkerParams
|
||||
batch []*model.Span
|
||||
finish chan bool
|
||||
workerDone chan *WriteWorker
|
||||
done sync.WaitGroup
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) Work() {
|
||||
worker.done.Add(1)
|
||||
|
||||
defer worker.done.Done()
|
||||
|
||||
// TODO: look for specific error(connection refused | database error)
|
||||
if err := worker.writeBatch(worker.batch); err != nil {
|
||||
worker.params.logger.Error("Could not write a batch of spans", "error", err, "worker_id", worker.workerID)
|
||||
} else {
|
||||
worker.close()
|
||||
return
|
||||
}
|
||||
attempt := 0
|
||||
for {
|
||||
currentDelay := worker.getCurrentDelay(&attempt, worker.params.delay)
|
||||
timer := time.After(currentDelay)
|
||||
select {
|
||||
case <-worker.finish:
|
||||
worker.close()
|
||||
return
|
||||
case <-timer:
|
||||
if err := worker.writeBatch(worker.batch); err != nil {
|
||||
worker.params.logger.Error("Could not write a batch of spans", "error", err, "worker_id", worker.workerID)
|
||||
} else {
|
||||
worker.close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) Close() {
|
||||
worker.finish <- true
|
||||
worker.done.Wait()
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) getCurrentDelay(attempt *int, delay time.Duration) time.Duration {
|
||||
if *attempt < len(delays) {
|
||||
*attempt++
|
||||
}
|
||||
return time.Duration(int64(delays[*attempt-1]) * delay.Nanoseconds())
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) close() {
|
||||
worker.workerDone <- worker
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) writeBatch(batch []*model.Span) error {
|
||||
worker.params.logger.Debug("Writing spans", "size", len(batch))
|
||||
if err := worker.writeModelBatch(batch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if worker.params.indexTable != "" {
|
||||
if err := worker.writeIndexBatch(batch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) writeModelBatch(batch []*model.Span) error {
|
||||
tx, err := worker.params.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
committed := false
|
||||
|
||||
defer func() {
|
||||
if !committed {
|
||||
// Clickhouse does not support real rollback
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
var query string
|
||||
if worker.params.tenant == "" {
|
||||
query = fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", worker.params.spansTable)
|
||||
} else {
|
||||
query = fmt.Sprintf("INSERT INTO %s (tenant, timestamp, traceID, model) VALUES (?, ?, ?, ?)", worker.params.spansTable)
|
||||
}
|
||||
|
||||
statement, err := tx.Prepare(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer statement.Close()
|
||||
|
||||
for _, span := range batch {
|
||||
var serialized []byte
|
||||
|
||||
if worker.params.encoding == EncodingJSON {
|
||||
serialized, err = json.Marshal(span)
|
||||
} else {
|
||||
serialized, err = proto.Marshal(span)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if worker.params.tenant == "" {
|
||||
_, err = statement.Exec(span.StartTime, span.TraceID.String(), serialized)
|
||||
} else {
|
||||
_, err = statement.Exec(worker.params.tenant, span.StartTime, span.TraceID.String(), serialized)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
committed = true
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (worker *WriteWorker) writeIndexBatch(batch []*model.Span) error {
|
||||
tx, err := worker.params.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
committed := false
|
||||
|
||||
defer func() {
|
||||
if !committed {
|
||||
// Clickhouse does not support real rollback
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
var query string
|
||||
if worker.params.tenant == "" {
|
||||
query = fmt.Sprintf(
|
||||
"INSERT INTO %s (timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||
worker.params.indexTable,
|
||||
)
|
||||
} else {
|
||||
query = fmt.Sprintf(
|
||||
"INSERT INTO %s (tenant, timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
worker.params.indexTable,
|
||||
)
|
||||
}
|
||||
|
||||
statement, err := tx.Prepare(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer statement.Close()
|
||||
|
||||
for _, span := range batch {
|
||||
keys, values := uniqueTagsForSpan(span)
|
||||
if worker.params.tenant == "" {
|
||||
_, err = statement.Exec(
|
||||
span.StartTime,
|
||||
span.TraceID.String(),
|
||||
span.Process.ServiceName,
|
||||
span.OperationName,
|
||||
uint64(span.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
)
|
||||
} else {
|
||||
_, err = statement.Exec(
|
||||
worker.params.tenant,
|
||||
span.StartTime,
|
||||
span.TraceID.String(),
|
||||
span.Process.ServiceName,
|
||||
span.OperationName,
|
||||
uint64(span.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
committed = true
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func uniqueTagsForSpan(span *model.Span) (keys, values []string) {
|
||||
uniqueTags := make(map[string][]string, len(span.Tags)+len(span.Process.Tags))
|
||||
|
||||
for i := range span.Tags {
|
||||
key := tagKey(&span.GetTags()[i])
|
||||
uniqueTags[key] = append(uniqueTags[key], tagValue(&span.GetTags()[i]))
|
||||
}
|
||||
|
||||
for i := range span.Process.Tags {
|
||||
key := tagKey(&span.GetProcess().GetTags()[i])
|
||||
uniqueTags[key] = append(uniqueTags[key], tagValue(&span.GetProcess().GetTags()[i]))
|
||||
}
|
||||
|
||||
for _, event := range span.Logs {
|
||||
for i := range event.Fields {
|
||||
key := tagKey(&event.GetFields()[i])
|
||||
uniqueTags[key] = append(uniqueTags[key], tagValue(&event.GetFields()[i]))
|
||||
}
|
||||
}
|
||||
|
||||
keys = make([]string, 0, len(uniqueTags))
|
||||
for k := range uniqueTags {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
values = make([]string, 0, len(uniqueTags))
|
||||
for _, key := range keys {
|
||||
values = append(values, strings.Join(unique(uniqueTags[key]), ","))
|
||||
}
|
||||
|
||||
return keys, values
|
||||
}
|
||||
|
||||
func tagKey(kv *model.KeyValue) string {
|
||||
return kv.Key
|
||||
}
|
||||
|
||||
func tagValue(kv *model.KeyValue) string {
|
||||
return kv.AsString()
|
||||
}
|
||||
|
||||
func unique(slice []string) []string {
|
||||
if len(slice) == 1 {
|
||||
return slice
|
||||
}
|
||||
|
||||
keys := make(map[string]bool)
|
||||
list := []string{}
|
||||
for _, entry := range slice {
|
||||
if _, value := keys[entry]; !value {
|
||||
keys[entry] = true
|
||||
list = append(list, entry)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
|
@ -0,0 +1,552 @@
|
|||
package clickhousespanstore
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore/mocks"
|
||||
)
|
||||
|
||||
const (
|
||||
testTagCount = 10
|
||||
testLogCount = 5
|
||||
testLogFieldCount = 5
|
||||
testIndexTable = "test_index_table"
|
||||
testSpansTable = "test_spans_table"
|
||||
testTenant = "test_tenant"
|
||||
)
|
||||
|
||||
type expectation struct {
|
||||
preparation string
|
||||
execArgs [][]driver.Value
|
||||
}
|
||||
|
||||
var (
|
||||
errorMock = fmt.Errorf("error mock")
|
||||
process = model.NewProcess("test_service", []model.KeyValue{model.String("test_process_key", "test_process_value")})
|
||||
testSpan = model.Span{
|
||||
TraceID: model.NewTraceID(1, 2),
|
||||
SpanID: model.NewSpanID(3),
|
||||
OperationName: "GET /unit_test",
|
||||
StartTime: testStartTime,
|
||||
Process: process,
|
||||
Tags: []model.KeyValue{model.String("test_string_key", "test_string_value"), model.Int64("test_int64_key", 4)},
|
||||
Logs: []model.Log{{Timestamp: testStartTime, Fields: []model.KeyValue{model.String("test_log_key", "test_log_value")}}},
|
||||
Duration: time.Minute,
|
||||
}
|
||||
testSpans = []*model.Span{&testSpan}
|
||||
keys, values = uniqueTagsForSpan(&testSpan)
|
||||
indexWriteExpectation = expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?)", testIndexTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
testSpan.Process.GetServiceName(),
|
||||
testSpan.OperationName,
|
||||
uint64(testSpan.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
}}}
|
||||
indexWriteExpectationTenant = expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (tenant, timestamp, traceID, service, operation, durationUs, tags.key, tags.value) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", testIndexTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
testTenant,
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
testSpan.Process.GetServiceName(),
|
||||
testSpan.OperationName,
|
||||
uint64(testSpan.Duration.Microseconds()),
|
||||
keys,
|
||||
values,
|
||||
}}}
|
||||
writeBatchLogs = []mocks.LogMock{{Msg: "Writing spans", Args: []interface{}{"size", len(testSpans)}}}
|
||||
)
|
||||
|
||||
func TestSpanWriter_TagKeyValue(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
kv model.KeyValue
|
||||
expected string
|
||||
}{
|
||||
"string value": {kv: model.String("tag_key", "tag_string_value"), expected: "tag_string_value"},
|
||||
"true value": {kv: model.Bool("tag_key", true), expected: "true"},
|
||||
"false value": {kv: model.Bool("tag_key", false), expected: "false"},
|
||||
"positive int value": {kv: model.Int64("tag_key", 1203912), expected: "1203912"},
|
||||
"negative int value": {kv: model.Int64("tag_key", -1203912), expected: "-1203912"},
|
||||
"float value": {kv: model.Float64("tag_key", 0.005009), expected: "0.005009"},
|
||||
}
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expected, tagValue(&test.kv), "Incorrect tag value string")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpanWriter_UniqueTagsForSpan(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
tags []model.KeyValue
|
||||
processTags []model.KeyValue
|
||||
logs []model.Log
|
||||
expectedKeys []string
|
||||
expectedValues []string
|
||||
}{
|
||||
"default": {
|
||||
tags: []model.KeyValue{model.String("key2", "value")},
|
||||
processTags: []model.KeyValue{model.Int64("key3", 412)},
|
||||
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
|
||||
expectedKeys: []string{"key1", "key2", "key3"},
|
||||
expectedValues: []string{"0.5", "value", "412"},
|
||||
},
|
||||
"repeating tags": {
|
||||
tags: []model.KeyValue{model.String("key2", "value"), model.String("key2", "value")},
|
||||
processTags: []model.KeyValue{model.Int64("key3", 412)},
|
||||
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
|
||||
expectedKeys: []string{"key1", "key2", "key3"},
|
||||
expectedValues: []string{"0.5", "value", "412"},
|
||||
},
|
||||
"repeating keys": {
|
||||
tags: []model.KeyValue{model.String("key2", "value_a"), model.String("key2", "value_b")},
|
||||
processTags: []model.KeyValue{model.Int64("key3", 412)},
|
||||
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
|
||||
expectedKeys: []string{"key1", "key2", "key3"},
|
||||
expectedValues: []string{"0.5", "value_a,value_b", "412"},
|
||||
},
|
||||
"repeating values": {
|
||||
tags: []model.KeyValue{model.String("key2", "value"), model.Int64("key4", 412)},
|
||||
processTags: []model.KeyValue{model.Int64("key3", 412)},
|
||||
logs: []model.Log{{Fields: []model.KeyValue{model.Float64("key1", .5)}}},
|
||||
expectedKeys: []string{"key1", "key2", "key3", "key4"},
|
||||
expectedValues: []string{"0.5", "value", "412", "412"},
|
||||
},
|
||||
}
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
process := model.Process{Tags: test.processTags}
|
||||
span := model.Span{Tags: test.tags, Process: &process, Logs: test.logs}
|
||||
actualKeys, actualValues := uniqueTagsForSpan(&span)
|
||||
assert.Equal(t, test.expectedKeys, actualKeys)
|
||||
assert.Equal(t, test.expectedValues, actualValues)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpanWriter_General(t *testing.T) {
|
||||
spanJSON, err := json.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectationJSON := getModelWriteExpectation(spanJSON, "")
|
||||
modelWriteExpectationJSONTenant := getModelWriteExpectation(spanJSON, testTenant)
|
||||
spanProto, err := proto.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectationProto := getModelWriteExpectation(spanProto, "")
|
||||
modelWriteExpectationProtoTenant := getModelWriteExpectation(spanProto, testTenant)
|
||||
tests := map[string]struct {
|
||||
encoding Encoding
|
||||
indexTable TableName
|
||||
tenant string
|
||||
spans []*model.Span
|
||||
expectations []expectation
|
||||
action func(writeWorker *WriteWorker, spans []*model.Span) error
|
||||
expectedLogs []mocks.LogMock
|
||||
}{
|
||||
"write index batch": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{indexWriteExpectation},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeIndexBatch(spans) },
|
||||
},
|
||||
"write index tenant batch": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{indexWriteExpectationTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeIndexBatch(spans) },
|
||||
},
|
||||
"write model batch JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSON},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write model tenant batch JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSONTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write model batch Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProto},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write model tenant batch Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProtoTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeModelBatch(spans) },
|
||||
},
|
||||
"write batch no index JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: "",
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSON},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch no index Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: "",
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProto},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSON, indexWriteExpectation},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch tenant JSON": {
|
||||
encoding: EncodingJSON,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationJSONTenant, indexWriteExpectationTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProto, indexWriteExpectation},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch tenant Proto": {
|
||||
encoding: EncodingProto,
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
spans: testSpans,
|
||||
expectations: []expectation{modelWriteExpectationProtoTenant, indexWriteExpectationTenant},
|
||||
action: func(writeWorker *WriteWorker, spans []*model.Span) error { return writeWorker.writeBatch(spans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
worker := getWriteWorker(spyLogger, db, test.encoding, test.indexTable, test.tenant)
|
||||
|
||||
for _, expectation := range test.expectations {
|
||||
mock.ExpectBegin()
|
||||
prep := mock.ExpectPrepare(expectation.preparation)
|
||||
for _, args := range expectation.execArgs {
|
||||
prep.ExpectExec().WithArgs(args...).WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
}
|
||||
mock.ExpectCommit()
|
||||
}
|
||||
|
||||
assert.NoError(t, test.action(&worker, test.spans))
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpanWriter_BeginError(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
action func(writeWorker *WriteWorker) error
|
||||
expectedLogs []mocks.LogMock
|
||||
}{
|
||||
"write model batch": {action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) }},
|
||||
"write index batch": {action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) }},
|
||||
"write batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable, "")
|
||||
|
||||
mock.ExpectBegin().WillReturnError(errorMock)
|
||||
|
||||
assert.ErrorIs(t, test.action(&writeWorker), errorMock)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpanWriter_PrepareError(t *testing.T) {
|
||||
spanJSON, err := json.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectation := getModelWriteExpectation(spanJSON, "")
|
||||
modelWriteExpectationTenant := getModelWriteExpectation(spanJSON, testTenant)
|
||||
|
||||
tests := map[string]struct {
|
||||
action func(writeWorker *WriteWorker) error
|
||||
tenant string
|
||||
expectation expectation
|
||||
expectedLogs []mocks.LogMock
|
||||
}{
|
||||
"write model batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) },
|
||||
expectation: modelWriteExpectation,
|
||||
},
|
||||
"write model tenant batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeModelBatch(testSpans) },
|
||||
tenant: testTenant,
|
||||
expectation: modelWriteExpectationTenant,
|
||||
},
|
||||
"write index batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) },
|
||||
expectation: indexWriteExpectation,
|
||||
},
|
||||
"write index tenant batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeIndexBatch(testSpans) },
|
||||
tenant: testTenant,
|
||||
expectation: indexWriteExpectationTenant,
|
||||
},
|
||||
"write batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
|
||||
expectation: modelWriteExpectation,
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write tenant batch": {
|
||||
action: func(writeWorker *WriteWorker) error { return writeWorker.writeBatch(testSpans) },
|
||||
tenant: testTenant,
|
||||
expectation: modelWriteExpectationTenant,
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
spanWriter := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable, test.tenant)
|
||||
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectPrepare(test.expectation.preparation).WillReturnError(errorMock)
|
||||
mock.ExpectRollback()
|
||||
|
||||
assert.ErrorIs(t, test.action(&spanWriter), errorMock)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpanWriter_ExecError(t *testing.T) {
|
||||
spanJSON, err := json.Marshal(&testSpan)
|
||||
require.NoError(t, err)
|
||||
modelWriteExpectation := getModelWriteExpectation(spanJSON, "")
|
||||
modelWriteExpectationTenant := getModelWriteExpectation(spanJSON, testTenant)
|
||||
tests := map[string]struct {
|
||||
indexTable TableName
|
||||
tenant string
|
||||
expectations []expectation
|
||||
action func(writer *WriteWorker) error
|
||||
expectedLogs []mocks.LogMock
|
||||
}{
|
||||
"write model batch": {
|
||||
indexTable: testIndexTable,
|
||||
expectations: []expectation{modelWriteExpectation},
|
||||
action: func(writer *WriteWorker) error { return writer.writeModelBatch(testSpans) },
|
||||
},
|
||||
"write model tenant batch": {
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
expectations: []expectation{modelWriteExpectationTenant},
|
||||
action: func(writer *WriteWorker) error { return writer.writeModelBatch(testSpans) },
|
||||
},
|
||||
"write index batch": {
|
||||
indexTable: testIndexTable,
|
||||
expectations: []expectation{indexWriteExpectation},
|
||||
action: func(writer *WriteWorker) error { return writer.writeIndexBatch(testSpans) },
|
||||
},
|
||||
"write index tenant batch": {
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
expectations: []expectation{indexWriteExpectationTenant},
|
||||
action: func(writer *WriteWorker) error { return writer.writeIndexBatch(testSpans) },
|
||||
},
|
||||
"write batch no index": {
|
||||
indexTable: "",
|
||||
expectations: []expectation{modelWriteExpectation},
|
||||
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write batch": {
|
||||
indexTable: testIndexTable,
|
||||
expectations: []expectation{modelWriteExpectation, indexWriteExpectation},
|
||||
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
"write tenant batch": {
|
||||
indexTable: testIndexTable,
|
||||
tenant: testTenant,
|
||||
expectations: []expectation{modelWriteExpectationTenant, indexWriteExpectationTenant},
|
||||
action: func(writer *WriteWorker) error { return writer.writeBatch(testSpans) },
|
||||
expectedLogs: writeBatchLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err, "an error was not expected when opening a stub database connection")
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
writeWorker := getWriteWorker(spyLogger, db, EncodingJSON, testIndexTable, test.tenant)
|
||||
|
||||
for i, expectation := range test.expectations {
|
||||
mock.ExpectBegin()
|
||||
prep := mock.ExpectPrepare(expectation.preparation)
|
||||
if i < len(test.expectations)-1 {
|
||||
for _, args := range expectation.execArgs {
|
||||
prep.ExpectExec().WithArgs(args...).WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
}
|
||||
mock.ExpectCommit()
|
||||
} else {
|
||||
prep.ExpectExec().WithArgs(expectation.execArgs[0]...).WillReturnError(errorMock)
|
||||
mock.ExpectRollback()
|
||||
}
|
||||
}
|
||||
|
||||
assert.ErrorIs(t, test.action(&writeWorker), errorMock)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, test.expectedLogs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getWriteWorker(spyLogger mocks.SpyLogger, db *sql.DB, encoding Encoding, indexTable TableName, tenant string) WriteWorker {
|
||||
return WriteWorker{
|
||||
params: &WorkerParams{
|
||||
logger: spyLogger,
|
||||
db: db,
|
||||
spansTable: testSpansTable,
|
||||
indexTable: indexTable,
|
||||
tenant: tenant,
|
||||
encoding: encoding,
|
||||
},
|
||||
workerDone: make(chan *WriteWorker),
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomSpans(count int) []*model.Span {
|
||||
spans := make([]*model.Span, count)
|
||||
for i := 0; i < count; i++ {
|
||||
span := generateRandomSpan()
|
||||
spans[i] = &span
|
||||
}
|
||||
return spans
|
||||
}
|
||||
|
||||
func generateRandomSpan() model.Span {
|
||||
processTags := generateRandomKeyValues(testTagCount)
|
||||
process := model.Process{
|
||||
ServiceName: "service" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
Tags: processTags,
|
||||
}
|
||||
span := model.Span{
|
||||
TraceID: model.NewTraceID(rand.Uint64(), rand.Uint64()),
|
||||
SpanID: model.NewSpanID(rand.Uint64()),
|
||||
OperationName: "operation" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
StartTime: getRandomTime(),
|
||||
Process: &process,
|
||||
Tags: generateRandomKeyValues(testTagCount),
|
||||
Logs: generateRandomLogs(),
|
||||
Duration: time.Unix(rand.Int63n(1<<32), 0).Sub(time.Unix(0, 0)),
|
||||
}
|
||||
return span
|
||||
}
|
||||
|
||||
func generateRandomLogs() []model.Log {
|
||||
logs := make([]model.Log, 0, testLogCount)
|
||||
for i := 0; i < testLogCount; i++ {
|
||||
timestamp := getRandomTime()
|
||||
logs = append(logs, model.Log{Timestamp: timestamp, Fields: generateRandomKeyValues(testLogFieldCount)})
|
||||
}
|
||||
return logs
|
||||
}
|
||||
|
||||
func getRandomTime() time.Time {
|
||||
return time.Unix(rand.Int63n(time.Now().Unix()), 0)
|
||||
}
|
||||
|
||||
func generateRandomKeyValues(count int) []model.KeyValue {
|
||||
tags := make([]model.KeyValue, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
key := "key" + strconv.FormatUint(rand.Uint64(), 16)
|
||||
value := "key" + strconv.FormatUint(rand.Uint64(), 16)
|
||||
kv := model.KeyValue{Key: key, VType: model.ValueType_STRING, VStr: value}
|
||||
tags = append(tags, kv)
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func getModelWriteExpectation(spanJSON []byte, tenant string) expectation {
|
||||
if tenant == "" {
|
||||
return expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", testSpansTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
spanJSON,
|
||||
}},
|
||||
}
|
||||
} else {
|
||||
return expectation{
|
||||
preparation: fmt.Sprintf("INSERT INTO %s (tenant, timestamp, traceID, model) VALUES (?, ?, ?, ?)", testSpansTable),
|
||||
execArgs: [][]driver.Value{{
|
||||
tenant,
|
||||
testSpan.StartTime,
|
||||
testSpan.TraceID.String(),
|
||||
spanJSON,
|
||||
}},
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,20 +3,13 @@ package clickhousespanstore
|
|||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/model"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type Encoding string
|
||||
|
@ -28,45 +21,76 @@ const (
|
|||
EncodingProto Encoding = "protobuf"
|
||||
)
|
||||
|
||||
var (
|
||||
numWritesWithBatchSize = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "jaeger_clickhouse_writes_with_batch_size_total",
|
||||
Help: "Number of clickhouse writes due to batch size criteria",
|
||||
})
|
||||
numWritesWithFlushInterval = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "jaeger_clickhouse_writes_with_flush_interval_total",
|
||||
Help: "Number of clickhouse writes due to flush interval criteria",
|
||||
})
|
||||
)
|
||||
|
||||
// SpanWriter for writing spans to ClickHouse
|
||||
type SpanWriter struct {
|
||||
logger hclog.Logger
|
||||
db *sql.DB
|
||||
indexTable string
|
||||
spansTable string
|
||||
encoding Encoding
|
||||
delay time.Duration
|
||||
size int
|
||||
spans chan *model.Span
|
||||
finish chan bool
|
||||
done sync.WaitGroup
|
||||
workerParams WorkerParams
|
||||
|
||||
size int64
|
||||
spans chan *model.Span
|
||||
finish chan bool
|
||||
done sync.WaitGroup
|
||||
}
|
||||
|
||||
var registerWriterMetrics sync.Once
|
||||
var _ spanstore.Writer = (*SpanWriter)(nil)
|
||||
|
||||
// NewSpanWriter returns a SpanWriter for the database
|
||||
func NewSpanWriter(logger hclog.Logger, db *sql.DB, indexTable string, spansTable string, encoding Encoding, delay time.Duration, size int) *SpanWriter {
|
||||
func NewSpanWriter(
|
||||
logger hclog.Logger,
|
||||
db *sql.DB,
|
||||
indexTable,
|
||||
spansTable TableName,
|
||||
tenant string,
|
||||
encoding Encoding,
|
||||
delay time.Duration,
|
||||
size int64,
|
||||
maxSpanCount int,
|
||||
) *SpanWriter {
|
||||
writer := &SpanWriter{
|
||||
logger: logger,
|
||||
db: db,
|
||||
indexTable: indexTable,
|
||||
spansTable: spansTable,
|
||||
encoding: encoding,
|
||||
delay: delay,
|
||||
size: size,
|
||||
spans: make(chan *model.Span, size),
|
||||
finish: make(chan bool),
|
||||
workerParams: WorkerParams{
|
||||
logger: logger,
|
||||
db: db,
|
||||
indexTable: indexTable,
|
||||
spansTable: spansTable,
|
||||
tenant: tenant,
|
||||
encoding: encoding,
|
||||
delay: delay,
|
||||
},
|
||||
size: size,
|
||||
spans: make(chan *model.Span, size),
|
||||
finish: make(chan bool),
|
||||
}
|
||||
|
||||
go writer.backgroundWriter()
|
||||
writer.registerMetrics()
|
||||
go writer.backgroundWriter(maxSpanCount)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (w *SpanWriter) backgroundWriter() {
|
||||
func (w *SpanWriter) registerMetrics() {
|
||||
registerWriterMetrics.Do(func() {
|
||||
prometheus.MustRegister(numWritesWithBatchSize)
|
||||
prometheus.MustRegister(numWritesWithFlushInterval)
|
||||
})
|
||||
}
|
||||
|
||||
func (w *SpanWriter) backgroundWriter(maxSpanCount int) {
|
||||
pool := NewWorkerPool(&w.workerParams, maxSpanCount)
|
||||
go pool.Work()
|
||||
batch := make([]*model.Span, 0, w.size)
|
||||
|
||||
timer := time.After(w.delay)
|
||||
timer := time.After(w.workerParams.delay)
|
||||
last := time.Now()
|
||||
|
||||
for {
|
||||
|
@ -79,23 +103,33 @@ func (w *SpanWriter) backgroundWriter() {
|
|||
case span := <-w.spans:
|
||||
batch = append(batch, span)
|
||||
flush = len(batch) == cap(batch)
|
||||
if flush {
|
||||
w.workerParams.logger.Debug("Flush due to batch size", "size", len(batch))
|
||||
numWritesWithBatchSize.Inc()
|
||||
}
|
||||
case <-timer:
|
||||
timer = time.After(w.delay)
|
||||
flush = time.Since(last) > w.delay && len(batch) > 0
|
||||
timer = time.After(w.workerParams.delay)
|
||||
flush = time.Since(last) > w.workerParams.delay && len(batch) > 0
|
||||
if flush {
|
||||
w.workerParams.logger.Debug("Flush due to timer")
|
||||
numWritesWithFlushInterval.Inc()
|
||||
}
|
||||
case <-w.finish:
|
||||
finish = true
|
||||
flush = len(batch) > 0
|
||||
w.workerParams.logger.Debug("Finish channel")
|
||||
}
|
||||
|
||||
if flush {
|
||||
if err := w.writeBatch(batch); err != nil {
|
||||
w.logger.Error("Could not write a batch of spans", zap.Error(err))
|
||||
}
|
||||
pool.WriteBatch(batch)
|
||||
|
||||
batch = make([]*model.Span, 0, w.size)
|
||||
last = time.Now()
|
||||
}
|
||||
|
||||
if finish {
|
||||
pool.Close()
|
||||
}
|
||||
w.done.Done()
|
||||
|
||||
if finish {
|
||||
|
@ -104,107 +138,6 @@ func (w *SpanWriter) backgroundWriter() {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *SpanWriter) writeBatch(batch []*model.Span) error {
|
||||
if err := w.writeModelBatch(batch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if w.indexTable != "" {
|
||||
if err := w.writeIndexBatch(batch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *SpanWriter) writeModelBatch(batch []*model.Span) error {
|
||||
tx, err := w.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commited := false
|
||||
|
||||
defer func() {
|
||||
if !commited {
|
||||
// Clickhouse does not support real rollback
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
statement, err := tx.Prepare(fmt.Sprintf("INSERT INTO %s (timestamp, traceID, model) VALUES (?, ?, ?)", w.spansTable))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer statement.Close()
|
||||
|
||||
for _, span := range batch {
|
||||
var serialized []byte
|
||||
|
||||
if w.encoding == EncodingJSON {
|
||||
serialized, err = json.Marshal(span)
|
||||
} else {
|
||||
serialized, err = proto.Marshal(span)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = statement.Exec(span.StartTime, span.TraceID.String(), serialized)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
commited = true
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (w *SpanWriter) writeIndexBatch(batch []*model.Span) error {
|
||||
tx, err := w.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commited := false
|
||||
|
||||
defer func() {
|
||||
if !commited {
|
||||
// Clickhouse does not support real rollback
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
statement, err := tx.Prepare(fmt.Sprintf("INSERT INTO %s (timestamp, traceID, service, operation, durationUs, tags) VALUES (?, ?, ?, ?, ?, ?)", w.indexTable))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer statement.Close()
|
||||
|
||||
for _, span := range batch {
|
||||
_, err = statement.Exec(
|
||||
span.StartTime,
|
||||
span.TraceID.String(),
|
||||
span.Process.ServiceName,
|
||||
span.OperationName,
|
||||
span.Duration.Microseconds(),
|
||||
uniqueTagsForSpan(span),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
commited = true
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// WriteSpan writes the encoded span
|
||||
func (w *SpanWriter) WriteSpan(_ context.Context, span *model.Span) error {
|
||||
w.spans <- span
|
||||
|
@ -217,43 +150,3 @@ func (w *SpanWriter) Close() error {
|
|||
w.done.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func uniqueTagsForSpan(span *model.Span) []string {
|
||||
uniqueTags := make(map[string]struct{}, len(span.Tags)+len(span.Process.Tags))
|
||||
|
||||
buf := &strings.Builder{}
|
||||
|
||||
for _, kv := range span.Tags {
|
||||
uniqueTags[tagString(buf, &kv)] = struct{}{}
|
||||
}
|
||||
|
||||
for _, kv := range span.Process.Tags {
|
||||
uniqueTags[tagString(buf, &kv)] = struct{}{}
|
||||
}
|
||||
|
||||
for _, event := range span.Logs {
|
||||
for _, kv := range event.Fields {
|
||||
uniqueTags[tagString(buf, &kv)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
tags := make([]string, 0, len(uniqueTags))
|
||||
|
||||
for kv := range uniqueTags {
|
||||
tags = append(tags, kv)
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func tagString(buf *strings.Builder, kv *model.KeyValue) string {
|
||||
buf.Reset()
|
||||
|
||||
buf.WriteString(kv.Key)
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(kv.AsString())
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
|
|
@ -1,16 +1,151 @@
|
|||
package storage
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
)
|
||||
|
||||
type EncodingType string
|
||||
|
||||
const (
|
||||
defaultEncoding = JSONEncoding
|
||||
JSONEncoding EncodingType = "json"
|
||||
ProtobufEncoding EncodingType = "protobuf"
|
||||
defaultMaxSpanCount = int(1e7)
|
||||
defaultBatchSize = 10_000
|
||||
defaultBatchDelay = time.Second * 5
|
||||
defaultUsername = "default"
|
||||
defaultDatabaseName = "default"
|
||||
defaultMetricsEndpoint = "localhost:9090"
|
||||
defaultMaxNumSpans = 0
|
||||
|
||||
defaultSpansTable clickhousespanstore.TableName = "jaeger_spans"
|
||||
defaultSpansIndexTable clickhousespanstore.TableName = "jaeger_index"
|
||||
defaultOperationsTable clickhousespanstore.TableName = "jaeger_operations"
|
||||
)
|
||||
|
||||
type Configuration struct {
|
||||
// ClickHouse address e.g. tcp://localhost:9000.
|
||||
Address string `yaml:"address"`
|
||||
// Directory with .sql files that are run at plugin startup.
|
||||
InitSQLScriptsDir string `yaml:"init_sql_scripts_dir"`
|
||||
// Batch write size. Default is 10_000.
|
||||
BatchWriteSize int `yaml:"batch_write_size"`
|
||||
BatchWriteSize int64 `yaml:"batch_write_size"`
|
||||
// Batch flush interval. Default is 5s.
|
||||
BatchFlushInterval time.Duration `yaml:"batch_flush_interval"`
|
||||
// Maximal amount of spans that can be pending writes at a time.
|
||||
// New spans exceeding this limit will be discarded,
|
||||
// keeping memory in check if there are issues writing to ClickHouse.
|
||||
// Check the "jaeger_clickhouse_discarded_spans" metric to keep track of discards.
|
||||
// Default 10_000_000, or disable the limit entirely by setting to 0.
|
||||
MaxSpanCount int `yaml:"max_span_count"`
|
||||
// Encoding either json or protobuf. Default is json.
|
||||
Encoding string `yaml:"encoding"`
|
||||
Encoding EncodingType `yaml:"encoding"`
|
||||
// ClickHouse address e.g. localhost:9000.
|
||||
Address string `yaml:"address"`
|
||||
// Directory with .sql files to run at plugin startup, mainly for integration tests.
|
||||
// Depending on the value of init_tables, this can be run as a
|
||||
// replacement or supplement to creating default tables for span storage.
|
||||
// If init_tables is also enabled, the scripts in this directory will be run first.
|
||||
InitSQLScriptsDir string `yaml:"init_sql_scripts_dir"`
|
||||
// Whether to automatically attempt to create tables in ClickHouse.
|
||||
// By default, this is enabled if init_sql_scripts_dir is empty,
|
||||
// or disabled if init_sql_scripts_dir is provided.
|
||||
InitTables *bool `yaml:"init_tables"`
|
||||
// Indicates location of TLS certificate used to connect to database.
|
||||
CaFile string `yaml:"ca_file"`
|
||||
// Username for connection to database. Default is "default".
|
||||
Username string `yaml:"username"`
|
||||
// Password for connection to database.
|
||||
Password string `yaml:"password"`
|
||||
// Database name. Default is "default"
|
||||
Database string `yaml:"database"`
|
||||
// Endpoint for scraping prometheus metrics e.g. localhost:9090.
|
||||
MetricsEndpoint string `yaml:"metrics_endpoint"`
|
||||
// Whether to use SQL scripts supporting replication and sharding. Default false.
|
||||
Replication bool `yaml:"replication"`
|
||||
// If non-empty, enables multitenancy in SQL scripts, and assigns the tenant name for this instance.
|
||||
Tenant string `yaml:"tenant"`
|
||||
// Table with spans. Default "jaeger_spans_local" or "jaeger_spans" when replication is enabled.
|
||||
SpansTable clickhousespanstore.TableName `yaml:"spans_table"`
|
||||
// Span index table. Default "jaeger_index_local" or "jaeger_index" when replication is enabled.
|
||||
SpansIndexTable clickhousespanstore.TableName `yaml:"spans_index_table"`
|
||||
// Operations table. Default "jaeger_operations_local" or "jaeger_operations" when replication is enabled.
|
||||
OperationsTable clickhousespanstore.TableName `yaml:"operations_table"`
|
||||
spansArchiveTable clickhousespanstore.TableName
|
||||
// TTL for data in tables in days. If 0, no TTL is set. Default 0.
|
||||
TTLDays uint `yaml:"ttl"`
|
||||
// The maximum number of spans to fetch per trace. If 0, no limits is set. Default 0.
|
||||
MaxNumSpans uint `yaml:"max_num_spans"`
|
||||
// The maximum number of open connections to the database. Default is unlimited (see: https://pkg.go.dev/database/sql#DB.SetMaxOpenConns)
|
||||
MaxOpenConns *uint `yaml:"max_open_conns"`
|
||||
// The maximum number of database connections in the idle connection pool. Default 2. (see: https://pkg.go.dev/database/sql#DB.SetMaxIdleConns)
|
||||
MaxIdleConns *uint `yaml:"max_idle_conns"`
|
||||
// The maximum amount of milliseconds a database connection may be reused. Default = connections are never closed due to age (see: https://pkg.go.dev/database/sql#DB.SetConnMaxLifetime)
|
||||
ConnMaxLifetimeMillis *uint `yaml:"conn_max_lifetime_millis"`
|
||||
// The maximum amount of milliseconds a database connection may be idle. Default = connections are never closed due to idle time (see: https://pkg.go.dev/database/sql#DB.SetConnMaxIdleTime)
|
||||
ConnMaxIdleTimeMillis *uint `yaml:"conn_max_idle_time_millis"`
|
||||
}
|
||||
|
||||
func (cfg *Configuration) setDefaults() {
|
||||
if cfg.BatchWriteSize == 0 {
|
||||
cfg.BatchWriteSize = defaultBatchSize
|
||||
}
|
||||
if cfg.BatchFlushInterval == 0 {
|
||||
cfg.BatchFlushInterval = defaultBatchDelay
|
||||
}
|
||||
if cfg.MaxSpanCount == 0 {
|
||||
cfg.MaxSpanCount = defaultMaxSpanCount
|
||||
}
|
||||
if cfg.Encoding == "" {
|
||||
cfg.Encoding = defaultEncoding
|
||||
}
|
||||
if cfg.InitTables == nil {
|
||||
// Decide whether to init tables based on whether a custom script path was provided
|
||||
var defaultInitTables bool
|
||||
if cfg.InitSQLScriptsDir == "" {
|
||||
defaultInitTables = true
|
||||
} else {
|
||||
defaultInitTables = false
|
||||
}
|
||||
cfg.InitTables = &defaultInitTables
|
||||
}
|
||||
if cfg.Username == "" {
|
||||
cfg.Username = defaultUsername
|
||||
}
|
||||
if cfg.Database == "" {
|
||||
cfg.Database = defaultDatabaseName
|
||||
}
|
||||
if cfg.MetricsEndpoint == "" {
|
||||
cfg.MetricsEndpoint = defaultMetricsEndpoint
|
||||
}
|
||||
if cfg.MaxNumSpans == 0 {
|
||||
cfg.MaxNumSpans = defaultMaxNumSpans
|
||||
}
|
||||
if cfg.SpansTable == "" {
|
||||
if cfg.Replication {
|
||||
cfg.SpansTable = defaultSpansTable
|
||||
cfg.spansArchiveTable = defaultSpansTable + "_archive"
|
||||
} else {
|
||||
cfg.SpansTable = defaultSpansTable.ToLocal()
|
||||
cfg.spansArchiveTable = (defaultSpansTable + "_archive").ToLocal()
|
||||
}
|
||||
} else {
|
||||
cfg.spansArchiveTable = cfg.SpansTable + "_archive"
|
||||
}
|
||||
if cfg.SpansIndexTable == "" {
|
||||
if cfg.Replication {
|
||||
cfg.SpansIndexTable = defaultSpansIndexTable
|
||||
} else {
|
||||
cfg.SpansIndexTable = defaultSpansIndexTable.ToLocal()
|
||||
}
|
||||
}
|
||||
if cfg.OperationsTable == "" {
|
||||
if cfg.Replication {
|
||||
cfg.OperationsTable = defaultOperationsTable
|
||||
} else {
|
||||
cfg.OperationsTable = defaultOperationsTable.ToLocal()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *Configuration) GetSpansArchiveTable() clickhousespanstore.TableName {
|
||||
return cfg.spansArchiveTable
|
||||
}
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
)
|
||||
|
||||
func TestSetDefaults(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
replication bool
|
||||
getField func(Configuration) interface{}
|
||||
expected interface{}
|
||||
}{
|
||||
"username": {
|
||||
getField: func(config Configuration) interface{} { return config.Username },
|
||||
expected: defaultUsername,
|
||||
},
|
||||
"database name": {
|
||||
getField: func(config Configuration) interface{} { return config.Database },
|
||||
expected: defaultDatabaseName,
|
||||
},
|
||||
"encoding": {
|
||||
getField: func(config Configuration) interface{} { return config.Encoding },
|
||||
expected: defaultEncoding,
|
||||
},
|
||||
"batch write size": {
|
||||
getField: func(config Configuration) interface{} { return config.BatchWriteSize },
|
||||
expected: defaultBatchSize,
|
||||
},
|
||||
"batch flush interval": {
|
||||
getField: func(config Configuration) interface{} { return config.BatchFlushInterval },
|
||||
expected: defaultBatchDelay,
|
||||
},
|
||||
"max span count": {
|
||||
getField: func(config Configuration) interface{} { return config.MaxSpanCount },
|
||||
expected: defaultMaxSpanCount,
|
||||
},
|
||||
"metrics endpoint": {
|
||||
getField: func(config Configuration) interface{} { return config.MetricsEndpoint },
|
||||
expected: defaultMetricsEndpoint,
|
||||
},
|
||||
"spans table name local": {
|
||||
getField: func(config Configuration) interface{} { return config.SpansTable },
|
||||
expected: defaultSpansTable.ToLocal(),
|
||||
},
|
||||
"spans table name replication": {
|
||||
replication: true,
|
||||
getField: func(config Configuration) interface{} { return config.SpansTable },
|
||||
expected: defaultSpansTable,
|
||||
},
|
||||
"index table name local": {
|
||||
getField: func(config Configuration) interface{} { return config.SpansIndexTable },
|
||||
expected: defaultSpansIndexTable.ToLocal(),
|
||||
},
|
||||
"index table name replication": {
|
||||
replication: true,
|
||||
getField: func(config Configuration) interface{} { return config.SpansIndexTable },
|
||||
expected: defaultSpansIndexTable,
|
||||
},
|
||||
"operations table name local": {
|
||||
getField: func(config Configuration) interface{} { return config.OperationsTable },
|
||||
expected: defaultOperationsTable.ToLocal(),
|
||||
},
|
||||
"operations table name replication": {
|
||||
replication: true,
|
||||
getField: func(config Configuration) interface{} { return config.OperationsTable },
|
||||
expected: defaultOperationsTable,
|
||||
},
|
||||
"max number spans": {
|
||||
getField: func(config Configuration) interface{} { return config.MaxNumSpans },
|
||||
expected: defaultMaxNumSpans,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(fmt.Sprintf("default %s", name), func(t *testing.T) {
|
||||
config := Configuration{Replication: test.replication}
|
||||
config.setDefaults()
|
||||
assert.EqualValues(t, test.expected, test.getField(config))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguration_GetSpansArchiveTable(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
config Configuration
|
||||
expectedSpansArchiveTableName clickhousespanstore.TableName
|
||||
}{
|
||||
"default_config_local": {config: Configuration{}, expectedSpansArchiveTableName: (defaultSpansTable + "_archive").ToLocal()},
|
||||
"default_config_replication": {config: Configuration{Replication: true}, expectedSpansArchiveTableName: defaultSpansTable + "_archive"},
|
||||
"custom_spans_table": {config: Configuration{SpansTable: "custom_table_name"}, expectedSpansArchiveTableName: "custom_table_name_archive"},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
test.config.setDefaults()
|
||||
assert.Equal(t, test.expectedSpansArchiveTableName, test.config.GetSpansArchiveTable())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguration_InitTables(test *testing.T) {
|
||||
// for pointers below
|
||||
t := true
|
||||
f := false
|
||||
tests := map[string]struct {
|
||||
config Configuration
|
||||
expectedInitTables bool
|
||||
}{
|
||||
"scriptsempty_initnil": {config: Configuration{}, expectedInitTables: true},
|
||||
"scriptsprovided_initnil": {config: Configuration{InitSQLScriptsDir: "hello"}, expectedInitTables: false},
|
||||
"scriptsempty_inittrue": {config: Configuration{InitTables: &t}, expectedInitTables: true},
|
||||
"scriptsprovided_inittrue": {config: Configuration{InitSQLScriptsDir: "hello", InitTables: &t}, expectedInitTables: true},
|
||||
"scriptsempty_initfalse": {config: Configuration{InitTables: &f}, expectedInitTables: false},
|
||||
"scriptsprovided_initfalse": {config: Configuration{InitSQLScriptsDir: "hello", InitTables: &f}, expectedInitTables: false},
|
||||
}
|
||||
|
||||
for name, testcase := range tests {
|
||||
test.Run(name, func(t *testing.T) {
|
||||
testcase.config.setDefaults()
|
||||
assert.Equal(t, testcase.expectedInitTables, *(testcase.config.InitTables))
|
||||
})
|
||||
}
|
||||
}
|
350
storage/store.go
|
@ -1,146 +1,341 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
_ "github.com/ClickHouse/clickhouse-go" // force SQL driver registration
|
||||
"github.com/hashicorp/go-hclog"
|
||||
clickhouse "github.com/ClickHouse/clickhouse-go/v2"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/jaegertracing/jaeger/plugin/storage/grpc/shared"
|
||||
"github.com/jaegertracing/jaeger/storage/dependencystore"
|
||||
"github.com/jaegertracing/jaeger/storage/spanstore"
|
||||
|
||||
"github.com/pavolloffay/jaeger-clickhouse/storage/clickhousedependencystore"
|
||||
"github.com/pavolloffay/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBatchSize = 10_000
|
||||
defaultBatchDelay = time.Second * 5
|
||||
jaegerclickhouse "github.com/jaegertracing/jaeger-clickhouse"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousedependencystore"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
db *sql.DB
|
||||
logger hclog.Logger
|
||||
cfg Configuration
|
||||
db *sql.DB
|
||||
writer spanstore.Writer
|
||||
reader spanstore.Reader
|
||||
archiveWriter spanstore.Writer
|
||||
archiveReader spanstore.Reader
|
||||
}
|
||||
|
||||
var _ shared.StoragePlugin = (*Store)(nil)
|
||||
var _ io.Closer = (*Store)(nil)
|
||||
var (
|
||||
_ shared.StoragePlugin = (*Store)(nil)
|
||||
_ shared.ArchiveStoragePlugin = (*Store)(nil)
|
||||
_ shared.StreamingSpanWriterPlugin = (*Store)(nil)
|
||||
_ io.Closer = (*Store)(nil)
|
||||
)
|
||||
|
||||
func NewStore(logger hclog.Logger, cfg Configuration, embeddedSQLScripts embed.FS) (*Store, error) {
|
||||
db, err := defaultConnector(cfg.Address)
|
||||
func NewStore(logger hclog.Logger, cfg Configuration) (*Store, error) {
|
||||
cfg.setDefaults()
|
||||
db, err := connector(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not connect to database: %q", err)
|
||||
}
|
||||
|
||||
if err := initializeDB(db, cfg.InitSQLScriptsDir, embeddedSQLScripts); err != nil {
|
||||
db.Close()
|
||||
if err := runInitScripts(logger, db, cfg); err != nil {
|
||||
_ = db.Close()
|
||||
return nil, err
|
||||
}
|
||||
if cfg.BatchWriteSize == 0 {
|
||||
cfg.BatchWriteSize = defaultBatchSize
|
||||
}
|
||||
if cfg.BatchFlushInterval == 0 {
|
||||
cfg.BatchFlushInterval = defaultBatchDelay
|
||||
}
|
||||
if cfg.Encoding == "" {
|
||||
cfg.Encoding = string(clickhousespanstore.EncodingJSON)
|
||||
if cfg.Replication {
|
||||
return &Store{
|
||||
db: db,
|
||||
writer: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
reader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
cfg.OperationsTable,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
"",
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
return &Store{
|
||||
db: db,
|
||||
logger: logger,
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
writer: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
reader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
cfg.OperationsTable,
|
||||
cfg.SpansIndexTable,
|
||||
cfg.SpansTable,
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
clickhousespanstore.Encoding(cfg.Encoding),
|
||||
cfg.BatchFlushInterval,
|
||||
cfg.BatchWriteSize,
|
||||
cfg.MaxSpanCount,
|
||||
),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
"",
|
||||
"",
|
||||
cfg.GetSpansArchiveTable(),
|
||||
cfg.Tenant,
|
||||
cfg.MaxNumSpans,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func initializeDB(db *sql.DB, initSQLScriptsDir string, embeddedScripts embed.FS) error {
|
||||
var sqlStatements []string
|
||||
if initSQLScriptsDir != "" {
|
||||
filePaths, err := walkMatch(initSQLScriptsDir, "*.sql")
|
||||
func connector(cfg Configuration) (*sql.DB, error) {
|
||||
var conn *sql.DB
|
||||
|
||||
options := clickhouse.Options{
|
||||
Addr: []string{sanitize(cfg.Address)},
|
||||
Auth: clickhouse.Auth{
|
||||
Database: cfg.Database,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
},
|
||||
Compression: &clickhouse.Compression{
|
||||
Method: clickhouse.CompressionLZ4,
|
||||
},
|
||||
}
|
||||
|
||||
if cfg.CaFile != "" {
|
||||
caCert, err := os.ReadFile(cfg.CaFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
options.TLS = &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
}
|
||||
}
|
||||
conn = clickhouse.OpenDB(&options)
|
||||
|
||||
if cfg.MaxOpenConns != nil {
|
||||
conn.SetMaxIdleConns(int(*cfg.MaxOpenConns))
|
||||
}
|
||||
if cfg.MaxIdleConns != nil {
|
||||
conn.SetMaxIdleConns(int(*cfg.MaxIdleConns))
|
||||
}
|
||||
if cfg.ConnMaxLifetimeMillis != nil {
|
||||
conn.SetConnMaxLifetime(time.Millisecond * time.Duration(*cfg.ConnMaxLifetimeMillis))
|
||||
}
|
||||
if cfg.ConnMaxIdleTimeMillis != nil {
|
||||
conn.SetConnMaxIdleTime(time.Millisecond * time.Duration(*cfg.ConnMaxIdleTimeMillis))
|
||||
}
|
||||
|
||||
if err := conn.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
type tableArgs struct {
|
||||
Database string
|
||||
|
||||
SpansIndexTable clickhousespanstore.TableName
|
||||
SpansTable clickhousespanstore.TableName
|
||||
OperationsTable clickhousespanstore.TableName
|
||||
SpansArchiveTable clickhousespanstore.TableName
|
||||
|
||||
TTLTimestamp string
|
||||
TTLDate string
|
||||
|
||||
Multitenant bool
|
||||
Replication bool
|
||||
}
|
||||
|
||||
type distributedTableArgs struct {
|
||||
Database string
|
||||
Table clickhousespanstore.TableName
|
||||
Hash string
|
||||
}
|
||||
|
||||
func render(templates *template.Template, filename string, args interface{}) string {
|
||||
var statement strings.Builder
|
||||
err := templates.ExecuteTemplate(&statement, filename, args)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return statement.String()
|
||||
}
|
||||
|
||||
func runInitScripts(logger hclog.Logger, db *sql.DB, cfg Configuration) error {
|
||||
var (
|
||||
sqlStatements []string
|
||||
ttlTimestamp string
|
||||
ttlDate string
|
||||
)
|
||||
if cfg.TTLDays > 0 {
|
||||
ttlTimestamp = fmt.Sprintf("TTL timestamp + INTERVAL %d DAY DELETE", cfg.TTLDays)
|
||||
ttlDate = fmt.Sprintf("TTL date + INTERVAL %d DAY DELETE", cfg.TTLDays)
|
||||
}
|
||||
if cfg.InitSQLScriptsDir != "" {
|
||||
filePaths, err := walkMatch(cfg.InitSQLScriptsDir, "*.sql")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list sql files: %q", err)
|
||||
}
|
||||
sort.Strings(filePaths)
|
||||
for _, f := range filePaths {
|
||||
sqlStatement, err := ioutil.ReadFile(f)
|
||||
sqlStatement, err := os.ReadFile(filepath.Clean(f))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, string(sqlStatement))
|
||||
}
|
||||
} else {
|
||||
f, err := embeddedScripts.ReadFile("sqlscripts/0001-jaeger-index.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, string(f))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/0002-jaeger-spans.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, string(f))
|
||||
f, err = embeddedScripts.ReadFile("sqlscripts/0003-jaeger-operations.sql")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlStatements = append(sqlStatements, string(f))
|
||||
}
|
||||
return executeScripts(sqlStatements, db)
|
||||
if *cfg.InitTables {
|
||||
templates := template.Must(template.ParseFS(jaegerclickhouse.SQLScripts, "sqlscripts/*.tmpl.sql"))
|
||||
|
||||
args := tableArgs{
|
||||
Database: cfg.Database,
|
||||
|
||||
SpansIndexTable: cfg.SpansIndexTable,
|
||||
SpansTable: cfg.SpansTable,
|
||||
OperationsTable: cfg.OperationsTable,
|
||||
SpansArchiveTable: cfg.GetSpansArchiveTable(),
|
||||
|
||||
TTLTimestamp: ttlTimestamp,
|
||||
TTLDate: ttlDate,
|
||||
|
||||
Multitenant: cfg.Tenant != "",
|
||||
Replication: cfg.Replication,
|
||||
}
|
||||
|
||||
if cfg.Replication {
|
||||
// Add "_local" to the local table names, and omit it from the distributed tables below
|
||||
args.SpansIndexTable = args.SpansIndexTable.ToLocal()
|
||||
args.SpansTable = args.SpansTable.ToLocal()
|
||||
args.OperationsTable = args.OperationsTable.ToLocal()
|
||||
args.SpansArchiveTable = args.SpansArchiveTable.ToLocal()
|
||||
}
|
||||
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-index.tmpl.sql", args))
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-operations.tmpl.sql", args))
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-spans.tmpl.sql", args))
|
||||
sqlStatements = append(sqlStatements, render(templates, "jaeger-spans-archive.tmpl.sql", args))
|
||||
|
||||
if cfg.Replication {
|
||||
// Now these tables omit the "_local" suffix
|
||||
distargs := distributedTableArgs{
|
||||
Table: cfg.SpansTable,
|
||||
Database: cfg.Database,
|
||||
Hash: "cityHash64(traceID)",
|
||||
}
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
|
||||
distargs.Table = cfg.SpansIndexTable
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
|
||||
distargs.Table = cfg.GetSpansArchiveTable()
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
|
||||
distargs.Table = cfg.OperationsTable
|
||||
distargs.Hash = "rand()"
|
||||
sqlStatements = append(sqlStatements, render(templates, "distributed-table.tmpl.sql", distargs))
|
||||
}
|
||||
}
|
||||
return executeScripts(logger, sqlStatements, db)
|
||||
}
|
||||
|
||||
func (s *Store) SpanReader() spanstore.Reader {
|
||||
return clickhousespanstore.NewTraceReader(s.db, "jaeger_operations_v2", "jaeger_index_v2", "jaeger_spans_v2")
|
||||
return s.reader
|
||||
}
|
||||
|
||||
func (s *Store) SpanWriter() spanstore.Writer {
|
||||
return clickhousespanstore.NewSpanWriter(s.logger, s.db, "jaeger_index_v2", "jaeger_spans_v2", clickhousespanstore.Encoding(s.cfg.Encoding), s.cfg.BatchFlushInterval, s.cfg.BatchWriteSize)
|
||||
return s.writer
|
||||
}
|
||||
|
||||
func (s *Store) DependencyReader() dependencystore.Reader {
|
||||
return clickhousedependencystore.NewDependencyStore()
|
||||
}
|
||||
|
||||
func (s *Store) ArchiveSpanReader() spanstore.Reader {
|
||||
return s.archiveReader
|
||||
}
|
||||
|
||||
func (s *Store) ArchiveSpanWriter() spanstore.Writer {
|
||||
return s.archiveWriter
|
||||
}
|
||||
|
||||
func (s *Store) StreamingSpanWriter() spanstore.Writer {
|
||||
return s.writer
|
||||
}
|
||||
|
||||
func (s *Store) Close() error {
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
func defaultConnector(datasource string) (*sql.DB, error) {
|
||||
db, err := sql.Open("clickhouse", datasource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func executeScripts(sqlStatements []string, db *sql.DB) error {
|
||||
func executeScripts(logger hclog.Logger, sqlStatements []string, db *sql.DB) error {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
committed := false
|
||||
defer func() {
|
||||
if !committed {
|
||||
tx.Rollback()
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, file := range sqlStatements {
|
||||
_, err = tx.Exec(file)
|
||||
for _, statement := range sqlStatements {
|
||||
logger.Debug("Running SQL statement", "statement", statement)
|
||||
_, err = tx.Exec(statement)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not run sql %q: %q", file, err)
|
||||
return fmt.Errorf("could not run sql %q: %q", statement, err)
|
||||
}
|
||||
}
|
||||
committed = true
|
||||
|
@ -168,3 +363,10 @@ func walkMatch(root, pattern string) ([]string, error) {
|
|||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// Earlier version of clickhouse-go used to expect address as tcp://host:port
|
||||
// while newer version of clickhouse-go expect address as host:port (without scheme)
|
||||
// so to maintain backward compatibility we clean it up
|
||||
func sanitize(addr string) string {
|
||||
return strings.TrimPrefix(addr, "tcp://")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
sqlmock "github.com/DATA-DOG/go-sqlmock"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousedependencystore"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore"
|
||||
"github.com/jaegertracing/jaeger-clickhouse/storage/clickhousespanstore/mocks"
|
||||
)
|
||||
|
||||
const (
|
||||
testIndexTable = "test_index_table"
|
||||
testSpansTable = "test_spans_table"
|
||||
testOperationsTable = "test_operation_table"
|
||||
testSpansArchiveTable = "test_spans_archive_table"
|
||||
)
|
||||
|
||||
var errorMock = fmt.Errorf("error mock")
|
||||
|
||||
func TestStore_SpanWriter(t *testing.T) {
|
||||
writer := clickhousespanstore.SpanWriter{}
|
||||
store := Store{
|
||||
writer: &writer,
|
||||
}
|
||||
assert.Equal(t, &writer, store.SpanWriter())
|
||||
}
|
||||
|
||||
func TestStore_ArchiveSpanWriter(t *testing.T) {
|
||||
writer := clickhousespanstore.SpanWriter{}
|
||||
store := Store{
|
||||
archiveWriter: &writer,
|
||||
}
|
||||
assert.Equal(t, &writer, store.ArchiveSpanWriter())
|
||||
}
|
||||
|
||||
func TestStore_SpanReader(t *testing.T) {
|
||||
reader := clickhousespanstore.TraceReader{}
|
||||
store := Store{
|
||||
reader: &reader,
|
||||
}
|
||||
assert.Equal(t, &reader, store.SpanReader())
|
||||
}
|
||||
|
||||
func TestStore_ArchiveSpanReader(t *testing.T) {
|
||||
reader := clickhousespanstore.TraceReader{}
|
||||
store := Store{
|
||||
archiveReader: &reader,
|
||||
}
|
||||
assert.Equal(t, &reader, store.ArchiveSpanReader())
|
||||
}
|
||||
|
||||
func TestStore_DependencyReader(t *testing.T) {
|
||||
store := Store{}
|
||||
assert.Equal(t, &clickhousedependencystore.DependencyStore{}, store.DependencyReader())
|
||||
}
|
||||
|
||||
func TestStore_Close(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
logger := mocks.NewSpyLogger()
|
||||
store := newStore(db, logger)
|
||||
|
||||
mock.ExpectClose()
|
||||
require.NoError(t, store.Close())
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
logger.AssertLogsEmpty(t)
|
||||
}
|
||||
|
||||
func newStore(db *sql.DB, logger mocks.SpyLogger) Store {
|
||||
return Store{
|
||||
db: db,
|
||||
writer: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
testIndexTable,
|
||||
testSpansTable,
|
||||
"",
|
||||
clickhousespanstore.EncodingJSON,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
reader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
testOperationsTable,
|
||||
testIndexTable,
|
||||
testSpansTable,
|
||||
"",
|
||||
0,
|
||||
),
|
||||
archiveWriter: clickhousespanstore.NewSpanWriter(
|
||||
logger,
|
||||
db,
|
||||
testIndexTable,
|
||||
testSpansArchiveTable,
|
||||
"",
|
||||
clickhousespanstore.EncodingJSON,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
archiveReader: clickhousespanstore.NewTraceReader(
|
||||
db,
|
||||
testOperationsTable,
|
||||
testIndexTable,
|
||||
testSpansArchiveTable,
|
||||
"",
|
||||
0,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_executeScripts(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
scripts := []string{
|
||||
"first SQL script",
|
||||
"second_SQL_script",
|
||||
}
|
||||
|
||||
mock.ExpectBegin()
|
||||
for _, script := range scripts {
|
||||
mock.ExpectExec(script).WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
}
|
||||
mock.ExpectCommit()
|
||||
err = executeScripts(spyLogger, scripts, db)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
spyLogger.AssertLogsOfLevelEqual(t, hclog.Debug, func() []mocks.LogMock {
|
||||
res := make([]mocks.LogMock, len(scripts))
|
||||
for i, script := range scripts {
|
||||
res[i] = mocks.LogMock{Msg: "Running SQL statement", Args: []interface{}{"statement", script}}
|
||||
}
|
||||
return res
|
||||
}())
|
||||
}
|
||||
|
||||
func TestStore_executeScriptsExecuteError(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
scripts := []string{
|
||||
"first SQL script",
|
||||
"second_SQL_script",
|
||||
}
|
||||
|
||||
mock.ExpectBegin()
|
||||
mock.ExpectExec(scripts[0]).WillReturnError(errorMock)
|
||||
mock.ExpectRollback()
|
||||
err = executeScripts(spyLogger, scripts, db)
|
||||
assert.EqualError(t, err, fmt.Sprintf("could not run sql %q: %q", scripts[0], errorMock))
|
||||
spyLogger.AssertLogsOfLevelEqual(
|
||||
t,
|
||||
hclog.Debug,
|
||||
[]mocks.LogMock{{Msg: "Running SQL statement", Args: []interface{}{"statement", scripts[0]}}},
|
||||
)
|
||||
}
|
||||
|
||||
func TestStore_executeScriptBeginError(t *testing.T) {
|
||||
db, mock, err := mocks.GetDbMock()
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
spyLogger := mocks.NewSpyLogger()
|
||||
scripts := []string{
|
||||
"first SQL script",
|
||||
"second_SQL_script",
|
||||
}
|
||||
|
||||
mock.ExpectBegin().WillReturnError(errorMock)
|
||||
err = executeScripts(spyLogger, scripts, db)
|
||||
assert.EqualError(t, err, errorMock.Error())
|
||||
}
|