mirror of https://github.com/thanos-io/thanos.git
Compare commits
67 Commits
v0.39.0-rc
...
main
Author | SHA1 | Date |
---|---|---|
|
49a560d09d | |
|
c3d4ea7cdd | |
|
98130c25d6 | |
|
bf8777dcc5 | |
|
be2f408d9e | |
|
e30e831b1c | |
|
cdecd4ee3f | |
|
97196973f3 | |
|
de1a2236eb | |
|
ba255aaccd | |
|
f1991970bf | |
|
9073c8d0c5 | |
|
ba5c91aefb | |
|
36681afb5e | |
|
5dd0031fab | |
|
c0273e1d1a | |
|
e78458176e | |
|
20900389bb | |
|
6f4895633a | |
|
0dc0b29fc8 | |
|
b4951291c7 | |
|
77f12e3e97 | |
|
dddffa99c4 | |
|
f2ff735e76 | |
|
dee991e0d9 | |
|
0972c43f29 | |
|
bd88416a19 | |
|
0bb3e73e9d | |
|
9f2acf9df9 | |
|
8b3c29acc7 | |
|
ecd54dafd0 | |
|
b51ef67654 | |
|
c8e9c2b12c | |
|
0f81bb792a | |
|
ddd5ff85f4 | |
|
49cccb4d83 | |
|
d6a926e613 | |
|
ad743914dd | |
|
35309514d1 | |
|
e9bdd79df2 | |
|
5583757964 | |
|
4240ff3579 | |
|
7c5ba37e5e | |
|
938c083d6b | |
|
9847758315 | |
|
246502a29b | |
|
d87029eea4 | |
|
3727363b49 | |
|
37254e5779 | |
|
4b31bbaa6b | |
|
d6ee898a06 | |
|
5a95d13802 | |
|
b54d293dbd | |
|
dfcbfe7c40 | |
|
8b738c55b1 | |
|
69624ecbf1 | |
|
0453c9b144 | |
|
9c955d21df | |
|
7de9c13e5f | |
|
34a98c8efb | |
|
ade0aed6f4 | |
|
a9ae3070b9 | |
|
62ec424747 | |
|
a631728945 | |
|
38a98c7ec0 | |
|
e2fb8c034b | |
|
72a4952f48 |
|
@ -9,6 +9,9 @@ run:
|
||||||
# exit code when at least one issue was found, default is 1
|
# exit code when at least one issue was found, default is 1
|
||||||
issues-exit-code: 1
|
issues-exit-code: 1
|
||||||
|
|
||||||
|
build-tags:
|
||||||
|
- slicelabels
|
||||||
|
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# The formats used to render issues.
|
# The formats used to render issues.
|
||||||
|
@ -57,7 +60,7 @@ issues:
|
||||||
# We don't check metrics naming in the tests.
|
# We don't check metrics naming in the tests.
|
||||||
- path: _test\.go
|
- path: _test\.go
|
||||||
linters:
|
linters:
|
||||||
- promlinter
|
- promlinter
|
||||||
# These are not being checked since these methods exist
|
# These are not being checked since these methods exist
|
||||||
# so that no one else could implement them.
|
# so that no one else could implement them.
|
||||||
- linters:
|
- linters:
|
||||||
|
|
|
@ -6,7 +6,11 @@ build:
|
||||||
binaries:
|
binaries:
|
||||||
- name: thanos
|
- name: thanos
|
||||||
path: ./cmd/thanos
|
path: ./cmd/thanos
|
||||||
flags: -a -tags netgo
|
flags: -a
|
||||||
|
tags:
|
||||||
|
- netgo
|
||||||
|
- slicelabels
|
||||||
|
|
||||||
ldflags: |
|
ldflags: |
|
||||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||||
|
|
34
CHANGELOG.md
34
CHANGELOG.md
|
@ -8,7 +8,38 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan
|
||||||
|
|
||||||
We use *breaking :warning:* to mark changes that are not backward compatible (relates only to v0.y.z releases.)
|
We use *breaking :warning:* to mark changes that are not backward compatible (relates only to v0.y.z releases.)
|
||||||
|
|
||||||
### [v0.39.0-rc.0](https://github.com/thanos-io/thanos/tree/release-0.39) - 2025 06 19
|
## Unreleased
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- [#8366](https://github.com/thanos-io/thanos/pull/8366) Store: optionally ignore Parquet migrated blocks
|
||||||
|
- [#8359](https://github.com/thanos-io/thanos/pull/8359) Tools: add `--shipper.upload-compacted` flag for uploading compacted blocks to bucket upload-blocks
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- [#8370](https://github.com/thanos-io/thanos/pull/8370) Query: announced labelset now reflects relabel-config
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
### [v0.39.2](https://github.com/thanos-io/thanos/tree/release-0.39) - 2025 07 17
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- [#8374](https://github.com/thanos-io/thanos/pull/8374) Query: fix panic when concurrently accessing annotations map
|
||||||
|
- [#8375](https://github.com/thanos-io/thanos/pull/8375) Query: fix native histogram buckets in distributed queries
|
||||||
|
|
||||||
|
### [v0.39.1](https://github.com/thanos-io/thanos/tree/release-0.39) - 2025 07 01
|
||||||
|
|
||||||
|
Fixes a memory leak issue on query-frontend. The bug only affects v0.39.0.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- [#8349](https://github.com/thanos-io/thanos/pull/8349) Query-Frontend: properly clean up resources
|
||||||
|
- [#8338](https://github.com/thanos-io/thanos/pull/8338) Query-Frontend: use original roundtripper + close immediately
|
||||||
|
|
||||||
|
## [v0.39.0](https://github.com/thanos-io/thanos/tree/release-0.39) - 2025 06 25
|
||||||
|
|
||||||
In short: there are a bunch of fixes and small improvements. The shining items in this release are memory usage improvements in Thanos Query and shuffle sharding support in Thanos Receiver. Information about shuffle sharding support is available in the documentation. Thank you to all contributors!
|
In short: there are a bunch of fixes and small improvements. The shining items in this release are memory usage improvements in Thanos Query and shuffle sharding support in Thanos Receiver. Information about shuffle sharding support is available in the documentation. Thank you to all contributors!
|
||||||
|
|
||||||
|
@ -33,7 +64,6 @@ In short: there are a bunch of fixes and small improvements. The shining items i
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- [#8199](https://github.com/thanos-io/thanos/pull/8199) Query: handle panics or nil pointer dereference in querier gracefully when query analyze returns nil
|
- [#8199](https://github.com/thanos-io/thanos/pull/8199) Query: handle panics or nil pointer dereference in querier gracefully when query analyze returns nil
|
||||||
|
|
||||||
- [#8211](https://github.com/thanos-io/thanos/pull/8211) Query: fix panic on nested partial response in distributed instant query
|
- [#8211](https://github.com/thanos-io/thanos/pull/8211) Query: fix panic on nested partial response in distributed instant query
|
||||||
- [#8216](https://github.com/thanos-io/thanos/pull/8216) Query/Receive: fix iter race between `next()` and `stop()` introduced in https://github.com/thanos-io/thanos/pull/7821.
|
- [#8216](https://github.com/thanos-io/thanos/pull/8216) Query/Receive: fix iter race between `next()` and `stop()` introduced in https://github.com/thanos-io/thanos/pull/7821.
|
||||||
- [#8212](https://github.com/thanos-io/thanos/pull/8212) Receive: Ensure forward/replication metrics are incremented in err cases
|
- [#8212](https://github.com/thanos-io/thanos/pull/8212) Receive: Ensure forward/replication metrics are incremented in err cases
|
||||||
|
|
|
@ -5,7 +5,7 @@ WORKDIR $GOPATH/src/github.com/thanos-io/thanos
|
||||||
|
|
||||||
COPY . $GOPATH/src/github.com/thanos-io/thanos
|
COPY . $GOPATH/src/github.com/thanos-io/thanos
|
||||||
|
|
||||||
RUN CGO_ENABLED=1 go build -o $GOBIN/thanos -race ./cmd/thanos
|
RUN CGO_ENABLED=1 go build -tags slicelabels -o $GOBIN/thanos -race ./cmd/thanos
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
FROM golang:1.24.0
|
FROM golang:1.24.0
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -319,7 +319,7 @@ test: export THANOS_TEST_ALERTMANAGER_PATH= $(ALERTMANAGER)
|
||||||
test: check-git install-tool-deps
|
test: check-git install-tool-deps
|
||||||
@echo ">> install thanos GOOPTS=${GOOPTS}"
|
@echo ">> install thanos GOOPTS=${GOOPTS}"
|
||||||
@echo ">> running unit tests (without /test/e2e). Do export THANOS_TEST_OBJSTORE_SKIP=GCS,S3,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI,OBS if you want to skip e2e tests against all real store buckets. Current value: ${THANOS_TEST_OBJSTORE_SKIP}"
|
@echo ">> running unit tests (without /test/e2e). Do export THANOS_TEST_OBJSTORE_SKIP=GCS,S3,AZURE,SWIFT,COS,ALIYUNOSS,BOS,OCI,OBS if you want to skip e2e tests against all real store buckets. Current value: ${THANOS_TEST_OBJSTORE_SKIP}"
|
||||||
@go test -race -timeout 15m $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e);
|
@go test -tags slicelabels -race -timeout 15m $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e);
|
||||||
|
|
||||||
.PHONY: test-local
|
.PHONY: test-local
|
||||||
test-local: ## Runs test excluding tests for ALL object storage integrations.
|
test-local: ## Runs test excluding tests for ALL object storage integrations.
|
||||||
|
@ -341,9 +341,9 @@ test-e2e: docker-e2e $(GOTESPLIT)
|
||||||
# * If you want to limit CPU time available in e2e tests then pass E2E_DOCKER_CPUS environment variable. For example, E2E_DOCKER_CPUS=0.05 limits CPU time available
|
# * If you want to limit CPU time available in e2e tests then pass E2E_DOCKER_CPUS environment variable. For example, E2E_DOCKER_CPUS=0.05 limits CPU time available
|
||||||
# to spawned Docker containers to 0.05 cores.
|
# to spawned Docker containers to 0.05 cores.
|
||||||
@if [ -n "$(SINGLE_E2E_TEST)" ]; then \
|
@if [ -n "$(SINGLE_E2E_TEST)" ]; then \
|
||||||
$(GOTESPLIT) -total ${GH_PARALLEL} -index ${GH_INDEX} ./test/e2e -- -run $(SINGLE_E2E_TEST) ${GOTEST_OPTS}; \
|
$(GOTESPLIT) -total ${GH_PARALLEL} -index ${GH_INDEX} ./test/e2e -- -tags slicelabels -run $(SINGLE_E2E_TEST) ${GOTEST_OPTS}; \
|
||||||
else \
|
else \
|
||||||
$(GOTESPLIT) -total ${GH_PARALLEL} -index ${GH_INDEX} ./test/e2e/... -- ${GOTEST_OPTS}; \
|
$(GOTESPLIT) -total ${GH_PARALLEL} -index ${GH_INDEX} ./test/e2e/... -- -tags slicelabels ${GOTEST_OPTS}; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
.PHONY: test-e2e-local
|
.PHONY: test-e2e-local
|
||||||
|
@ -418,7 +418,7 @@ github.com/prometheus/prometheus/promql/parser.{ParseExpr,ParseMetricSelector}=g
|
||||||
io/ioutil.{Discard,NopCloser,ReadAll,ReadDir,ReadFile,TempDir,TempFile,Writefile}" $(shell go list ./... | grep -v "internal/cortex")
|
io/ioutil.{Discard,NopCloser,ReadAll,ReadDir,ReadFile,TempDir,TempFile,Writefile}" $(shell go list ./... | grep -v "internal/cortex")
|
||||||
@$(FAILLINT) -paths "fmt.{Print,Println,Sprint}" -ignore-tests ./...
|
@$(FAILLINT) -paths "fmt.{Print,Println,Sprint}" -ignore-tests ./...
|
||||||
@echo ">> linting all of the Go files GOGC=${GOGC}"
|
@echo ">> linting all of the Go files GOGC=${GOGC}"
|
||||||
@$(GOLANGCI_LINT) run
|
@$(GOLANGCI_LINT) run --build-tags=slicelabels
|
||||||
@echo ">> ensuring Copyright headers"
|
@echo ">> ensuring Copyright headers"
|
||||||
@go run ./scripts/copyright
|
@go run ./scripts/copyright
|
||||||
@echo ">> ensuring generated proto files are up to date"
|
@echo ">> ensuring generated proto files are up to date"
|
||||||
|
|
|
@ -42,9 +42,10 @@ type fileContent interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type endpointSettings struct {
|
type endpointSettings struct {
|
||||||
Strict bool `yaml:"strict"`
|
Strict bool `yaml:"strict"`
|
||||||
Group bool `yaml:"group"`
|
Group bool `yaml:"group"`
|
||||||
Address string `yaml:"address"`
|
Address string `yaml:"address"`
|
||||||
|
ServiceConfig string `yaml:"service_config"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type EndpointConfig struct {
|
type EndpointConfig struct {
|
||||||
|
@ -115,6 +116,9 @@ func validateEndpointConfig(cfg EndpointConfig) error {
|
||||||
if dns.IsDynamicNode(ecfg.Address) && ecfg.Strict {
|
if dns.IsDynamicNode(ecfg.Address) && ecfg.Strict {
|
||||||
return errors.Newf("%s is a dynamically specified endpoint i.e. it uses SD and that is not permitted under strict mode.", ecfg.Address)
|
return errors.Newf("%s is a dynamically specified endpoint i.e. it uses SD and that is not permitted under strict mode.", ecfg.Address)
|
||||||
}
|
}
|
||||||
|
if !ecfg.Group && len(ecfg.ServiceConfig) != 0 {
|
||||||
|
return errors.Newf("%s service_config is only valid for endpoint groups.", ecfg.Address)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -321,7 +325,7 @@ func setupEndpointSet(
|
||||||
for _, ecfg := range endpointConfig.Endpoints {
|
for _, ecfg := range endpointConfig.Endpoints {
|
||||||
strict, group, addr := ecfg.Strict, ecfg.Group, ecfg.Address
|
strict, group, addr := ecfg.Strict, ecfg.Group, ecfg.Address
|
||||||
if group {
|
if group {
|
||||||
specs = append(specs, query.NewGRPCEndpointSpec(fmt.Sprintf("thanos:///%s", addr), strict, append(dialOpts, extgrpc.EndpointGroupGRPCOpts()...)...))
|
specs = append(specs, query.NewGRPCEndpointSpec(fmt.Sprintf("thanos:///%s", addr), strict, append(dialOpts, extgrpc.EndpointGroupGRPCOpts(ecfg.ServiceConfig)...)...))
|
||||||
} else if !dns.IsDynamicNode(addr) {
|
} else if !dns.IsDynamicNode(addr) {
|
||||||
specs = append(specs, query.NewGRPCEndpointSpec(addr, strict, dialOpts...))
|
specs = append(specs, query.NewGRPCEndpointSpec(addr, strict, dialOpts...))
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,11 @@ type erroringBucket struct {
|
||||||
bkt objstore.InstrumentedBucket
|
bkt objstore.InstrumentedBucket
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Provider returns the provider of the bucket.
|
||||||
|
func (b *erroringBucket) Provider() objstore.ObjProvider {
|
||||||
|
return b.bkt.Provider()
|
||||||
|
}
|
||||||
|
|
||||||
func (b *erroringBucket) Close() error {
|
func (b *erroringBucket) Close() error {
|
||||||
return b.bkt.Close()
|
return b.bkt.Close()
|
||||||
}
|
}
|
||||||
|
@ -91,8 +96,8 @@ func (b *erroringBucket) Attributes(ctx context.Context, name string) (objstore.
|
||||||
|
|
||||||
// Upload the contents of the reader as an object into the bucket.
|
// Upload the contents of the reader as an object into the bucket.
|
||||||
// Upload should be idempotent.
|
// Upload should be idempotent.
|
||||||
func (b *erroringBucket) Upload(ctx context.Context, name string, r io.Reader) error {
|
func (b *erroringBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error {
|
||||||
return b.bkt.Upload(ctx, name, r)
|
return b.bkt.Upload(ctx, name, r, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes the object with the given name.
|
// Delete removes the object with the given name.
|
||||||
|
@ -134,9 +139,9 @@ func TestRegression4960_Deadlock(t *testing.T) {
|
||||||
id, err = e2eutil.CreateBlock(
|
id, err = e2eutil.CreateBlock(
|
||||||
ctx,
|
ctx,
|
||||||
dir,
|
dir,
|
||||||
[]labels.Labels{{{Name: "a", Value: "1"}}},
|
[]labels.Labels{labels.FromStrings("a", "1")},
|
||||||
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
||||||
labels.Labels{{Name: "e1", Value: "1"}},
|
labels.FromStrings("e1", "1"),
|
||||||
downsample.ResLevel0, metadata.NoneFunc, nil)
|
downsample.ResLevel0, metadata.NoneFunc, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc))
|
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc))
|
||||||
|
@ -145,9 +150,9 @@ func TestRegression4960_Deadlock(t *testing.T) {
|
||||||
id2, err = e2eutil.CreateBlock(
|
id2, err = e2eutil.CreateBlock(
|
||||||
ctx,
|
ctx,
|
||||||
dir,
|
dir,
|
||||||
[]labels.Labels{{{Name: "a", Value: "2"}}},
|
[]labels.Labels{labels.FromStrings("a", "2")},
|
||||||
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
||||||
labels.Labels{{Name: "e1", Value: "2"}},
|
labels.FromStrings("e1", "2"),
|
||||||
downsample.ResLevel0, metadata.NoneFunc, nil)
|
downsample.ResLevel0, metadata.NoneFunc, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id2.String()), metadata.NoneFunc))
|
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id2.String()), metadata.NoneFunc))
|
||||||
|
@ -156,9 +161,9 @@ func TestRegression4960_Deadlock(t *testing.T) {
|
||||||
id3, err = e2eutil.CreateBlock(
|
id3, err = e2eutil.CreateBlock(
|
||||||
ctx,
|
ctx,
|
||||||
dir,
|
dir,
|
||||||
[]labels.Labels{{{Name: "a", Value: "2"}}},
|
[]labels.Labels{labels.FromStrings("a", "2")},
|
||||||
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
||||||
labels.Labels{{Name: "e1", Value: "2"}},
|
labels.FromStrings("e1", "2"),
|
||||||
downsample.ResLevel0, metadata.NoneFunc, nil)
|
downsample.ResLevel0, metadata.NoneFunc, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id3.String()), metadata.NoneFunc))
|
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id3.String()), metadata.NoneFunc))
|
||||||
|
@ -196,9 +201,9 @@ func TestCleanupDownsampleCacheFolder(t *testing.T) {
|
||||||
id, err = e2eutil.CreateBlock(
|
id, err = e2eutil.CreateBlock(
|
||||||
ctx,
|
ctx,
|
||||||
dir,
|
dir,
|
||||||
[]labels.Labels{{{Name: "a", Value: "1"}}},
|
[]labels.Labels{labels.FromStrings("a", "1")},
|
||||||
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
1, 0, downsample.ResLevel1DownsampleRange+1, // Pass the minimum ResLevel1DownsampleRange check.
|
||||||
labels.Labels{{Name: "e1", Value: "1"}},
|
labels.FromStrings("e1", "1"),
|
||||||
downsample.ResLevel0, metadata.NoneFunc, nil)
|
downsample.ResLevel0, metadata.NoneFunc, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc))
|
testutil.Ok(t, block.Upload(ctx, logger, bkt, path.Join(dir, id.String()), metadata.NoneFunc))
|
||||||
|
|
|
@ -553,6 +553,7 @@ func runQuery(
|
||||||
tenantCertField,
|
tenantCertField,
|
||||||
enforceTenancy,
|
enforceTenancy,
|
||||||
tenantLabel,
|
tenantLabel,
|
||||||
|
tsdbSelector,
|
||||||
)
|
)
|
||||||
|
|
||||||
api.Register(router.WithPrefix("/api/v1"), tracer, logger, ins, logMiddleware)
|
api.Register(router.WithPrefix("/api/v1"), tracer, logger, ins, logMiddleware)
|
||||||
|
|
|
@ -329,13 +329,13 @@ func runQueryFrontend(
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
roundTripper, err := cortexfrontend.NewDownstreamRoundTripper(cfg.DownstreamURL, downstreamTripper)
|
downstreamRT, err := cortexfrontend.NewDownstreamRoundTripper(cfg.DownstreamURL, downstreamTripper)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "setup downstream roundtripper")
|
return errors.Wrap(err, "setup downstream roundtripper")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the downstream RoundTripper into query frontend Tripperware.
|
// Wrap the downstream RoundTripper into query frontend Tripperware.
|
||||||
roundTripper = tripperWare(roundTripper)
|
roundTripper := tripperWare(downstreamRT)
|
||||||
|
|
||||||
// Create the query frontend transport.
|
// Create the query frontend transport.
|
||||||
handler := transport.NewHandler(*cfg.CortexHandlerConfig, roundTripper, logger, nil)
|
handler := transport.NewHandler(*cfg.CortexHandlerConfig, roundTripper, logger, nil)
|
||||||
|
@ -402,17 +402,9 @@ func runQueryFrontend(
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
g.Add(func() error {
|
g.Add(func() error {
|
||||||
|
|
||||||
var firstRun = true
|
var firstRun = true
|
||||||
for {
|
|
||||||
if !firstRun {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
case <-time.After(10 * time.Second):
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
doCheckDownstream := func() (rerr error) {
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -422,23 +414,33 @@ func runQueryFrontend(
|
||||||
return errors.Wrap(err, "creating request to downstream URL")
|
return errors.Wrap(err, "creating request to downstream URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := roundTripper.RoundTrip(req)
|
resp, err := downstreamRT.RoundTrip(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Warn(logger).Log("msg", "failed to reach downstream URL", "err", err, "readiness_url", readinessUrl)
|
return errors.Wrapf(err, "roundtripping to downstream URL %s", readinessUrl)
|
||||||
statusProber.NotReady(err)
|
|
||||||
firstRun = false
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
runutil.ExhaustCloseWithLogOnErr(logger, resp.Body, "downstream health check response body")
|
defer runutil.CloseWithErrCapture(&rerr, resp.Body, "downstream health check response body")
|
||||||
|
|
||||||
if resp.StatusCode/100 == 4 || resp.StatusCode/100 == 5 {
|
if resp.StatusCode/100 == 4 || resp.StatusCode/100 == 5 {
|
||||||
level.Warn(logger).Log("msg", "downstream URL returned an error", "status_code", resp.StatusCode, "readiness_url", readinessUrl)
|
return errors.Errorf("downstream URL %s returned an error: %d", readinessUrl, resp.StatusCode)
|
||||||
statusProber.NotReady(errors.Errorf("downstream URL %s returned an error: %d", readinessUrl, resp.StatusCode))
|
|
||||||
firstRun = false
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
statusProber.Ready()
|
return nil
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if !firstRun {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
case <-time.After(10 * time.Second):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
firstRun = false
|
||||||
|
|
||||||
|
if err := doCheckDownstream(); err != nil {
|
||||||
|
statusProber.NotReady(err)
|
||||||
|
} else {
|
||||||
|
statusProber.Ready()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}, func(err error) {
|
}, func(err error) {
|
||||||
cancel()
|
cancel()
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/util/compression"
|
||||||
"github.com/thanos-io/objstore"
|
"github.com/thanos-io/objstore"
|
||||||
"github.com/thanos-io/objstore/client"
|
"github.com/thanos-io/objstore/client"
|
||||||
objstoretracing "github.com/thanos-io/objstore/tracing/opentracing"
|
objstoretracing "github.com/thanos-io/objstore/tracing/opentracing"
|
||||||
|
@ -35,6 +35,7 @@ import (
|
||||||
|
|
||||||
"github.com/thanos-io/thanos/pkg/block/metadata"
|
"github.com/thanos-io/thanos/pkg/block/metadata"
|
||||||
"github.com/thanos-io/thanos/pkg/component"
|
"github.com/thanos-io/thanos/pkg/component"
|
||||||
|
"github.com/thanos-io/thanos/pkg/compressutil"
|
||||||
"github.com/thanos-io/thanos/pkg/exemplars"
|
"github.com/thanos-io/thanos/pkg/exemplars"
|
||||||
"github.com/thanos-io/thanos/pkg/extgrpc"
|
"github.com/thanos-io/thanos/pkg/extgrpc"
|
||||||
"github.com/thanos-io/thanos/pkg/extgrpc/snappy"
|
"github.com/thanos-io/thanos/pkg/extgrpc/snappy"
|
||||||
|
@ -93,7 +94,7 @@ func registerReceive(app *extkingpin.App) {
|
||||||
MaxBytes: int64(conf.tsdbMaxBytes),
|
MaxBytes: int64(conf.tsdbMaxBytes),
|
||||||
OutOfOrderCapMax: conf.tsdbOutOfOrderCapMax,
|
OutOfOrderCapMax: conf.tsdbOutOfOrderCapMax,
|
||||||
NoLockfile: conf.noLockFile,
|
NoLockfile: conf.noLockFile,
|
||||||
WALCompression: wlog.ParseCompressionType(conf.walCompression, string(wlog.CompressionSnappy)),
|
WALCompression: compressutil.ParseCompressionType(conf.walCompression, compression.Snappy),
|
||||||
MaxExemplars: conf.tsdbMaxExemplars,
|
MaxExemplars: conf.tsdbMaxExemplars,
|
||||||
EnableExemplarStorage: conf.tsdbMaxExemplars > 0,
|
EnableExemplarStorage: conf.tsdbMaxExemplars > 0,
|
||||||
HeadChunksWriteQueueSize: int(conf.tsdbWriteQueueSize),
|
HeadChunksWriteQueueSize: int(conf.tsdbWriteQueueSize),
|
||||||
|
|
|
@ -41,7 +41,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/storage/remote"
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/tsdb/agent"
|
"github.com/prometheus/prometheus/tsdb/agent"
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/util/compression"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/thanos-io/objstore"
|
"github.com/thanos-io/objstore"
|
||||||
|
@ -54,6 +54,7 @@ import (
|
||||||
"github.com/thanos-io/thanos/pkg/block/metadata"
|
"github.com/thanos-io/thanos/pkg/block/metadata"
|
||||||
"github.com/thanos-io/thanos/pkg/clientconfig"
|
"github.com/thanos-io/thanos/pkg/clientconfig"
|
||||||
"github.com/thanos-io/thanos/pkg/component"
|
"github.com/thanos-io/thanos/pkg/component"
|
||||||
|
"github.com/thanos-io/thanos/pkg/compressutil"
|
||||||
"github.com/thanos-io/thanos/pkg/discovery/dns"
|
"github.com/thanos-io/thanos/pkg/discovery/dns"
|
||||||
"github.com/thanos-io/thanos/pkg/errutil"
|
"github.com/thanos-io/thanos/pkg/errutil"
|
||||||
"github.com/thanos-io/thanos/pkg/extannotations"
|
"github.com/thanos-io/thanos/pkg/extannotations"
|
||||||
|
@ -112,8 +113,9 @@ type ruleConfig struct {
|
||||||
storeRateLimits store.SeriesSelectLimits
|
storeRateLimits store.SeriesSelectLimits
|
||||||
ruleConcurrentEval int64
|
ruleConcurrentEval int64
|
||||||
|
|
||||||
extendedFunctionsEnabled bool
|
extendedFunctionsEnabled bool
|
||||||
EnableFeatures []string
|
EnableFeatures []string
|
||||||
|
tsdbEnableNativeHistograms bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type Expression struct {
|
type Expression struct {
|
||||||
|
@ -170,6 +172,10 @@ func registerRule(app *extkingpin.App) {
|
||||||
cmd.Flag("query.enable-x-functions", "Whether to enable extended rate functions (xrate, xincrease and xdelta). Only has effect when used with Thanos engine.").Default("false").BoolVar(&conf.extendedFunctionsEnabled)
|
cmd.Flag("query.enable-x-functions", "Whether to enable extended rate functions (xrate, xincrease and xdelta). Only has effect when used with Thanos engine.").Default("false").BoolVar(&conf.extendedFunctionsEnabled)
|
||||||
cmd.Flag("enable-feature", "Comma separated feature names to enable. Valid options for now: promql-experimental-functions (enables promql experimental functions for ruler)").Default("").StringsVar(&conf.EnableFeatures)
|
cmd.Flag("enable-feature", "Comma separated feature names to enable. Valid options for now: promql-experimental-functions (enables promql experimental functions for ruler)").Default("").StringsVar(&conf.EnableFeatures)
|
||||||
|
|
||||||
|
cmd.Flag("tsdb.enable-native-histograms",
|
||||||
|
"[EXPERIMENTAL] Enables the ingestion of native histograms.").
|
||||||
|
Default("false").BoolVar(&conf.tsdbEnableNativeHistograms)
|
||||||
|
|
||||||
conf.rwConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write configurations, that specify servers where samples should be sent to (see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.", extflag.WithEnvSubstitution())
|
conf.rwConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write configurations, that specify servers where samples should be sent to (see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.", extflag.WithEnvSubstitution())
|
||||||
|
|
||||||
conf.objStoreConfig = extkingpin.RegisterCommonObjStoreFlags(cmd, "", false)
|
conf.objStoreConfig = extkingpin.RegisterCommonObjStoreFlags(cmd, "", false)
|
||||||
|
@ -189,15 +195,16 @@ func registerRule(app *extkingpin.App) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbOpts := &tsdb.Options{
|
tsdbOpts := &tsdb.Options{
|
||||||
MinBlockDuration: int64(time.Duration(*tsdbBlockDuration) / time.Millisecond),
|
MinBlockDuration: int64(time.Duration(*tsdbBlockDuration) / time.Millisecond),
|
||||||
MaxBlockDuration: int64(time.Duration(*tsdbBlockDuration) / time.Millisecond),
|
MaxBlockDuration: int64(time.Duration(*tsdbBlockDuration) / time.Millisecond),
|
||||||
RetentionDuration: int64(time.Duration(*tsdbRetention) / time.Millisecond),
|
RetentionDuration: int64(time.Duration(*tsdbRetention) / time.Millisecond),
|
||||||
NoLockfile: *noLockFile,
|
NoLockfile: *noLockFile,
|
||||||
WALCompression: wlog.ParseCompressionType(*walCompression, string(wlog.CompressionSnappy)),
|
WALCompression: compressutil.ParseCompressionType(*walCompression, compression.Snappy),
|
||||||
|
EnableNativeHistograms: conf.tsdbEnableNativeHistograms,
|
||||||
}
|
}
|
||||||
|
|
||||||
agentOpts := &agent.Options{
|
agentOpts := &agent.Options{
|
||||||
WALCompression: wlog.ParseCompressionType(*walCompression, string(wlog.CompressionSnappy)),
|
WALCompression: compressutil.ParseCompressionType(*walCompression, compression.Snappy),
|
||||||
NoLockfile: *noLockFile,
|
NoLockfile: *noLockFile,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -393,14 +393,16 @@ func runStore(
|
||||||
return errors.Errorf("unknown sync strategy %s", conf.blockListStrategy)
|
return errors.Errorf("unknown sync strategy %s", conf.blockListStrategy)
|
||||||
}
|
}
|
||||||
ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(logger, insBkt, time.Duration(conf.ignoreDeletionMarksDelay), conf.blockMetaFetchConcurrency)
|
ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(logger, insBkt, time.Duration(conf.ignoreDeletionMarksDelay), conf.blockMetaFetchConcurrency)
|
||||||
metaFetcher, err := block.NewMetaFetcher(logger, conf.blockMetaFetchConcurrency, insBkt, blockLister, dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg),
|
filters := []block.MetadataFilter{
|
||||||
[]block.MetadataFilter{
|
block.NewTimePartitionMetaFilter(conf.filterConf.MinTime, conf.filterConf.MaxTime),
|
||||||
block.NewTimePartitionMetaFilter(conf.filterConf.MinTime, conf.filterConf.MaxTime),
|
block.NewLabelShardedMetaFilter(relabelConfig),
|
||||||
block.NewLabelShardedMetaFilter(relabelConfig),
|
block.NewConsistencyDelayMetaFilter(logger, time.Duration(conf.consistencyDelay), extprom.WrapRegistererWithPrefix("thanos_", reg)),
|
||||||
block.NewConsistencyDelayMetaFilter(logger, time.Duration(conf.consistencyDelay), extprom.WrapRegistererWithPrefix("thanos_", reg)),
|
ignoreDeletionMarkFilter,
|
||||||
ignoreDeletionMarkFilter,
|
block.NewDeduplicateFilter(conf.blockMetaFetchConcurrency),
|
||||||
block.NewDeduplicateFilter(conf.blockMetaFetchConcurrency),
|
block.NewParquetMigratedMetaFilter(logger),
|
||||||
})
|
}
|
||||||
|
|
||||||
|
metaFetcher, err := block.NewMetaFetcher(logger, conf.blockMetaFetchConcurrency, insBkt, blockLister, dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg), filters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "meta fetcher")
|
return errors.Wrap(err, "meta fetcher")
|
||||||
}
|
}
|
||||||
|
|
|
@ -166,8 +166,9 @@ type bucketMarkBlockConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type bucketUploadBlocksConfig struct {
|
type bucketUploadBlocksConfig struct {
|
||||||
path string
|
path string
|
||||||
labels []string
|
labels []string
|
||||||
|
uploadCompacted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tbc *bucketVerifyConfig) registerBucketVerifyFlag(cmd extkingpin.FlagClause) *bucketVerifyConfig {
|
func (tbc *bucketVerifyConfig) registerBucketVerifyFlag(cmd extkingpin.FlagClause) *bucketVerifyConfig {
|
||||||
|
@ -300,6 +301,7 @@ func (tbc *bucketRetentionConfig) registerBucketRetentionFlag(cmd extkingpin.Fla
|
||||||
func (tbc *bucketUploadBlocksConfig) registerBucketUploadBlocksFlag(cmd extkingpin.FlagClause) *bucketUploadBlocksConfig {
|
func (tbc *bucketUploadBlocksConfig) registerBucketUploadBlocksFlag(cmd extkingpin.FlagClause) *bucketUploadBlocksConfig {
|
||||||
cmd.Flag("path", "Path to the directory containing blocks to upload.").Default("./data").StringVar(&tbc.path)
|
cmd.Flag("path", "Path to the directory containing blocks to upload.").Default("./data").StringVar(&tbc.path)
|
||||||
cmd.Flag("label", "External labels to add to the uploaded blocks (repeated).").PlaceHolder("key=\"value\"").StringsVar(&tbc.labels)
|
cmd.Flag("label", "External labels to add to the uploaded blocks (repeated).").PlaceHolder("key=\"value\"").StringsVar(&tbc.labels)
|
||||||
|
cmd.Flag("shipper.upload-compacted", "If true shipper will try to upload compacted blocks as well.").Default("false").BoolVar(&tbc.uploadCompacted)
|
||||||
|
|
||||||
return tbc
|
return tbc
|
||||||
}
|
}
|
||||||
|
@ -1509,6 +1511,7 @@ func registerBucketUploadBlocks(app extkingpin.AppClause, objStoreConfig *extfla
|
||||||
shipper.WithSource(metadata.BucketUploadSource),
|
shipper.WithSource(metadata.BucketUploadSource),
|
||||||
shipper.WithMetaFileName(shipper.DefaultMetaFilename),
|
shipper.WithMetaFileName(shipper.DefaultMetaFilename),
|
||||||
shipper.WithLabels(func() labels.Labels { return lset }),
|
shipper.WithLabels(func() labels.Labels { return lset }),
|
||||||
|
shipper.WithUploadCompacted(tbc.uploadCompacted),
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
|
@ -0,0 +1,245 @@
|
||||||
|
---
|
||||||
|
title: Life of a Sample in Thanos and How to Configure It – Data Management – Part II
|
||||||
|
date: "2024-09-16"
|
||||||
|
author: Thibault Mangé (https://github.com/thibaultmg)
|
||||||
|
---
|
||||||
|
|
||||||
|
## Life of a Sample in Thanos and How to Configure It – Data Management – Part II
|
||||||
|
|
||||||
|
### Introduction
|
||||||
|
|
||||||
|
In the first part of this series, we followed the life of a sample from its inception in a Prometheus server to our Thanos Receivers. We will now explore how Thanos manages the data ingested by the Receivers and optimizes it in the object store for reduced cost and fast retrieval.
|
||||||
|
|
||||||
|
Let's delve into these topics and more in the second part of the series.
|
||||||
|
|
||||||
|
### Preparing Samples for Object Storage: Building Chunks and Blocks
|
||||||
|
|
||||||
|
#### Using Object Storage
|
||||||
|
|
||||||
|
A key feature of Thanos is its ability to leverage economical object storage solutions like AWS S3 for long-term data retention. This contrasts with Prometheus's typical approach of storing data locally for shorter periods.
|
||||||
|
|
||||||
|
The Receive component is responsible for preparing data for object storage. Thanos adopts the TSDB (Time Series Database) data model, with some adaptations, for its object storage. This involves aggregating samples over time to construct TSDB Blocks. Please refer to the annexes of the first part if this vocabulary is not clear to you.
|
||||||
|
|
||||||
|
These blocks are built by aggregating data over two-hour periods. Once a block is ready, it is sent to the object storage, which is configured using the `--objstore.config` flag. This configuration is uniform across all components requiring object storage access.
|
||||||
|
|
||||||
|
On restarts, the Receive component ensures data preservation by immediately flushing existing data to object storage, even if it does not constitute a full two-hour block. These partial blocks are less efficient but are then optimized by the compactor, as we will see later.
|
||||||
|
|
||||||
|
The Receive is also able to [isolate data](https://thanos.io/tip/components/receive.md/#tenant-lifecycle-management) coming from different tenants. The tenant can be identified in the request by different means: a header (`--receive.tenant-header`), a label (`--receive.split-tenant-label-name`) or a certificate (`--receive.tenant-certificate-field`). Their data is ingested into different TSDBs instances (you might hear this referred to as the multiTSDB). The benefits are twofold:
|
||||||
|
|
||||||
|
* It allows for parallelization of the block-building process, especially on the compactor side as we will see later.
|
||||||
|
* It allows for smaller indexes. Indeed, labels tend to be similar for samples coming from the same source, leading to more effective compression.
|
||||||
|
|
||||||
|
<img src="img/life-of-a-sample/multi-tsdb.png" alt="Data expansion" style="max-width: 600px; display: block;margin: 0 auto;"/>
|
||||||
|
|
||||||
|
When a block is ready, it is uploaded to the object store with the block external label defined by the flag `--receive.tenant-label-name`. This corresponds to the `thanos.labels` field of the [block metadata](https://thanos.io/tip/thanos/storage.md/#metadata-file-metajson). This will be used by the compactor to group blocks together, as we will see later.
|
||||||
|
|
||||||
|
#### Exposing Local Data for Queries
|
||||||
|
|
||||||
|
During the block-building phase, the data is not accessible to the Store Gateway as it has not been uploaded to the object store yet. To counter that, the Receive component also serves as a data store, making the local data available for query through the `Store API`. This is a common gRPC API used across all Thanos components for time series data access, set with the `--grpc-address` flag. The Receive will serve all data it has. The more data it serves, the more resources it will use for this duty in addition to ingesting client data.
|
||||||
|
|
||||||
|
<img src="img/life-of-a-sample/receive-store-api.png" alt="Data expansion" style="max-width: 600px; display: block;margin: 0 auto;"/>
|
||||||
|
|
||||||
|
The amount of data the Receive component serves can be managed through two parameters:
|
||||||
|
|
||||||
|
* `--tsdb.retention`: Sets the local storage retention duration. The minimum is 2 hours, aligning with block construction periods.
|
||||||
|
* `--store.limits.request-samples` and `--store.limits.request-series`: These parameters limit the volume of data that can be queried by setting a maximum on the number of samples and/or the number of series. If these limits are exceeded, the query will be denied to ensure system stability.
|
||||||
|
|
||||||
|
Key points to consider:
|
||||||
|
|
||||||
|
* The primary objective of the Receive component is to ensure **reliable data ingestion**. However, the more data it serves through the Store API, the more resources it will use for this duty in addition to ingesting client data. You should set the retention duration to the minimum required for your use case to optimize resource allocation. The minimum value for 2-hour blocks would be a 4-hour retention to account for availability in the Store Gateway after the block is uploaded to object storage. To prevent data loss, if the Receive component fails to upload blocks before the retention limit is reached, it will hold them until the upload succeeds.
|
||||||
|
* Even when the retention duration is short, your Receive instance could be overwhelmed by a query selecting too much data. You should set limits in place to ensure the stability of the Receive instances. These limits must be carefully set to enable Store API clients to retrieve the data they need while preventing resource exhaustion. The longer the retention, the higher the limits should be as the number of samples and series will increase.
|
||||||
|
|
||||||
|
### Maintaining Data: Compaction, Downsampling, and Retention
|
||||||
|
|
||||||
|
#### The Need for Compaction
|
||||||
|
|
||||||
|
The Receive component implements many strategies to ingest samples reliably. However, this can result in unoptimized data in object storage. This is due to:
|
||||||
|
|
||||||
|
* Inefficient partial blocks sent to object storage on shutdowns.
|
||||||
|
* Duplicated data when replication is set. Several Receive instances will send the same data to object storage.
|
||||||
|
* Incomplete blocks (invalid blocks) sent to object storage when the Receive fails in the middle of an upload.
|
||||||
|
|
||||||
|
The following diagram illustrates the impact on data expansion in object storage when samples from a given target are ingested from a high-availability Prometheus setup (with 2 instances) and replication is set on the Receive (factor 3):
|
||||||
|
|
||||||
|
<img src="img/life-of-a-sample/data-expansion.png" alt="Data expansion" style="max-width: 600px; display: block;margin: 0 auto;"/>
|
||||||
|
|
||||||
|
This leads to a threefold increase in label volume (one for each block) and a sixfold increase in sample volume! This is where the Compactor comes into play.
|
||||||
|
|
||||||
|
The Compactor component is responsible for maintaining and optimizing data in object storage. It is a long-running process when configured to wait for new blocks with the `--wait` flag. It also needs access to the object storage using the `--objstore.config` flag.
|
||||||
|
|
||||||
|
Under normal operating conditions, the Compactor will check for new blocks every 5 minutes. By default, it will only consider blocks that are older than 30 minutes (configured with the `--consistency-delay` flag) to avoid reading partially uploaded blocks. It will then process these blocks in a structured manner, compacting them according to defined settings that we will discuss in the next sections.
|
||||||
|
|
||||||
|
#### Compaction Modes
|
||||||
|
|
||||||
|
Compaction consists of merging blocks that have overlapping or adjacent time ranges. This is called **horizontal compaction**. Using the [Metadata file](https://thanos.io/tip/thanos/storage.md/#metadata-file-metajson) which contains the minimum and maximum timestamps of samples in the block, the Compactor can determine if two blocks overlap. If they do, they are merged into a new block. This new block will have its compaction level index increased by one. So from two adjacent blocks of 2 hours each, we will get a new block of 4 hours.
|
||||||
|
|
||||||
|
During this compaction, the Compactor will also deduplicate samples. This is called [**vertical compaction**](https://thanos.io/tip/components/compact.md/#vertical-compactions). The Compactor provides two deduplication modes:
|
||||||
|
|
||||||
|
* `one-to-one`: This is the default mode. It will deduplicate samples that have the same timestamp and the same value but different replica label values. The replica label is configured by the `--deduplication.replica-label` flag. This flag can be repeated to account for several replication labels. Usually set to `replica`, make sure it is set up as external label on the Receivers with the flag `--label=replica=xxx`. The benefit of this mode is that it is straightforward and will remove replicated data from the Receive. However, it is not able to remove data replicated by high-availability Prometheus setups because these samples will rarely be scraped at exactly the same timestamps, as demonstrated by the diagram below.
|
||||||
|
* `penalty`: This a more complex deduplication algorithm that is able to deduplicate data coming from high availability prometheus setups. It can be set with the `--deduplication.func` flag and requires also setting the `--deduplication.replica-label` flag that identifies the label that contains the replica label. Usually `prometheus_replica`.
|
||||||
|
|
||||||
|
Here is a diagram illustrating how Prometheus replicas generate samples with different timestamps that cannot be deduplicated with the `one-to-one` mode:
|
||||||
|
|
||||||
|
<img src="img/life-of-a-sample/ha-prometheus-duplicates.png" alt="High availability prometheus duplication" style="max-width: 600px; display: block;margin: 0 auto;"/>
|
||||||
|
|
||||||
|
Getting back to our example illustrating the data duplication happening in the object storage, here is how each compaction process will impact the data:
|
||||||
|
|
||||||
|
<img src="img/life-of-a-sample/compactor-compaction.png" alt="Compactor compaction" width="700"/>
|
||||||
|
|
||||||
|
First, horizontal compaction will merge blocks together. This will mostly have an effect on the labels data that are stored in a compressed format in a single index binary file attached to a single block. Then, one-to-one deduplication will remove identical samples and delete the related replica label. Finally, penalty deduplication will remove duplicated samples resulting from concurrent scrapes in high-availability Prometheus setups and remove the related replica label.
|
||||||
|
|
||||||
|
You want to deduplicate data as much as possible because it will lower your object storage cost and improve query performance. However, using the penalty mode presents some limitations. For more details, see [the documentation](https://thanos.io/tip/components/compact.md/#vertical-compaction-risks).
|
||||||
|
|
||||||
|
Key points to consider:
|
||||||
|
|
||||||
|
* You want blocks that are not too big because they will be slow to query. However, you also want to limit the number of blocks because having too many will increase the number of requests to the object storage. Also, the more blocks there are, the less compaction occurs, and the more data there is to store and load into memory.
|
||||||
|
* You do not need to worry about too small blocks, as the Compactor will merge them together. However, you could have too big blocks. This can happen if you have very high cardinality workloads or churn-heavy workloads like CI runs, build pipelines, serverless functions, or batch jobs, which often lead to huge cardinality explosions as the metrics labels will be changing often.
|
||||||
|
* The main solution to this is splitting the data into several block streams, as we will see later. This is Thanos's sharding strategy.
|
||||||
|
* There are also cases where you might want to limit the size of the blocks. To that effect, you can use the following parameters:
|
||||||
|
* You can limit the compaction levels with `--debug.max-compaction-level` to prevent the Compactor from creating blocks that are too big. This is especially useful when you have a high metrics churn rate. Level 1 is the default and will create blocks of 2 hours. Level 2 will create blocks of 8 hours, level 3 of 2 days, and up to level 4 of 14 days. Without this limit, the Compactor will create blocks of up to 2 weeks. This is not a magic bullet; it does not limit the data size of the blocks. It just limits the number of blocks that can be merged together. The downside of using this setting is that it will increase the number of blocks in the object storage. They will use more space, and the query performance might be impacted.
|
||||||
|
* The flag `compact.block-max-index-size` can be used more effectively to specify the maximum index size beyond which the Compactor will stop block compaction, independently of its compaction level. Once a block's index exceeds this size, the system marks it for no further compaction. The default value is 64 GB, which is the maximum index size the TSDB supports. As a result, some block streams might appear discontinuous in the UI, displaying a lower compaction level than the surrounding blocks.
|
||||||
|
|
||||||
|
#### Scaling the Compactor: Block Streams
|
||||||
|
|
||||||
|
Not all blocks covering the same time range are compacted together. Instead, the Compactor organizes them into distinct [compaction groups or block streams](https://thanos.io/tip/components/compact.md/#compaction-groups--block-streams). The key here is to leverage external labels to group data originating from the same source. This strategic grouping is particularly effective for compacting indexes, as blocks from the same source tend to have nearly identical labels.
|
||||||
|
|
||||||
|
You can improve the performance of the Compactor by:
|
||||||
|
|
||||||
|
* Increasing the number of concurrent compactions using the `--max-concurrent` flag. Bear in mind that you must scale storage, memory and CPU resources accordingly (linearly).
|
||||||
|
* Sharding the data. In this mode, each Compactor will process a disjoint set of block streams. This is done by setting up the `--selector.relabel-config` flag on the external labels. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- action: hashmod
|
||||||
|
source_labels:
|
||||||
|
- tenant_id # An external label that identifies some block streams
|
||||||
|
target_label: shard
|
||||||
|
modulus: 2 # The number of Compactor replicas
|
||||||
|
- action: keep
|
||||||
|
source_labels:
|
||||||
|
- shard
|
||||||
|
regex: 0 # The shard number assigned to this Compactor
|
||||||
|
```
|
||||||
|
|
||||||
|
In this configuration, the `hashmod` action is used to distribute blocks across multiple Compactor instances based on the `tenant_id` label. The `modulus` should match the number of Compactor replicas you have. Each replica will then only process the blocks that match its shard number, as defined by the `regex` in the `keep` action.
|
||||||
|
|
||||||
|
#### Downsampling and Retention
|
||||||
|
|
||||||
|
The Compactor also optimizes data reads for long-range queries. If you are querying data for several months, you do not need the typical 15-second raw resolution. Processing such a query will be very inefficient, as it will retrieve a lot of unnecessary data that you will not be able to visualize with such detail in your UI. In worst-case scenarios, it may even cause some components of your Thanos setup to fail due to memory exhaustion.
|
||||||
|
|
||||||
|
To enable performant long range queries, the Compactor can downsample data using `--retention.resolution-*` flags. It supports two downsampling levels: 5 minutes and 1 hour. These are the resolutions of the downsampled series. They will typically come on top of the raw data, so that you can have both raw and downsampled data. This will enable you to spot abnormal patterns over long-range queries and then zoom into specific parts using the raw data. We will discuss how to configure the query to use the downsampled data in the next article.
|
||||||
|
|
||||||
|
When the Compactor performs downsampling, it does more than simply reduce the number of data points by removing intermediate samples. While reducing the volume of data is a primary goal, especially to improve performance for long-range queries, the Compactor ensures that essential statistical properties of the original data are preserved. This is crucial for maintaining the accuracy and integrity of any aggregations or analyses performed on the downsampled data. In addition to the downsampled data, it stores the count, minimum, maximum, and sum of the downsampled window. Functions like sum(), min(), max(), and avg() can then be computed correctly over the downsampled data because the necessary statistical information is preserved.
|
||||||
|
|
||||||
|
This downsampled data is then stored in its own block, one per downsampling level for each corresponding raw block.
|
||||||
|
|
||||||
|
Key points to consider:
|
||||||
|
|
||||||
|
* Downsampling is not for reducing the volume of data in object storage. It is for improving long-range query performance, making your system more versatile and stable.
|
||||||
|
* Thanos recommends having the same retention duration for raw and downsampled data. This will enable you to have a consistent view of your data over time.
|
||||||
|
* As a rule of thumb, you can consider that each downsampled data level increases the storage need by onefold compared to the raw data, although it is often a bit less than that.
|
||||||
|
|
||||||
|
#### The Compactor UI and the Block Streams
|
||||||
|
|
||||||
|
The Compactor's functionality and the progress of its operations can be monitored through the **Block Viewer UI**. This web-based interface is accessible if the Compactor is configured with the `--http-address` flag. Additional UI settings are controlled via `--web.*` and `--block-viewer.*` flags. The Compactor UI provides a visual representation of the compaction process, showing how blocks are grouped and compacted over time. Here is a glimpse of what the UI looks like:
|
||||||
|
|
||||||
|
<img src="img/life-of-a-sample/compactor-ui.png" alt="Receive and Store data overlap" width="800"/>
|
||||||
|
|
||||||
|
Occasionally, some blocks may display an artificially high compaction level in the UI, appearing lower in the stream compared to adjacent blocks. This scenario often occurs in situations like rolling Receiver upgrades, where Receivers restart sequentially, leading to the creation and upload of partial blocks to the object store. The Compactor then vertically compacts these blocks as they arrive, resulting in a temporary increase in compaction levels. When these blocks are horizontally compacted with adjacent blocks, they will be displayed higher up in the stream.
|
||||||
|
|
||||||
|
As explained earlier with compaction levels, by default, the Compactor’s strategy involves compacting 2-hour blocks into 8-hour blocks once they are available, then progressing to 2-day blocks, and up to 14 days, following a structured compaction timeline.
|
||||||
|
|
||||||
|
### Exposing Bucket Data for Queries: The Store Gateway and the Store API
|
||||||
|
|
||||||
|
#### Exposing Data for Queries
|
||||||
|
|
||||||
|
The Store Gateway acts as a facade for the object storage, making bucket data accessible via the Thanos Store API, a feature first introduced with the Receive component. The Store Gateway exposes the Store API with the `--grpc-address` flag.
|
||||||
|
|
||||||
|
The Store Gateway requires access to the object storage bucket to retrieve data, which is configured with the `--objstore.config` flag. You can use the `--max-time` flag to specify which blocks should be considered by the Store Gateway. For example, if your Receive instances are serving data up to 10 hours, you may configure `--max-time=-8h` so that it does not consider blocks more recent than 8 hours. This avoids returning the same data as the Receivers while ensuring some overlap between the two.
|
||||||
|
|
||||||
|
To function optimally, the Store Gateway relies on caches. To understand their usefulness, let's first explore how the Store Gateway retrieves data from the blocks in the object storage.
|
||||||
|
|
||||||
|
#### Retrieving Samples from the Object Store
|
||||||
|
|
||||||
|
Consider the simple following query done on the Querier:
|
||||||
|
|
||||||
|
```promql
|
||||||
|
# Between now and 2 days ago, compute the rate of http requests per second, filtered by method and status
|
||||||
|
rate(http_requests_total{method="GET", status="200"}[5m])
|
||||||
|
```
|
||||||
|
|
||||||
|
This PromQL query will be parsed by the Querier, which will emit a Thanos [Store API](https://github.com/thanos-io/thanos/blob/main/pkg/store/storepb/rpc.proto) request to the Store Gateway with the following parameters:
|
||||||
|
|
||||||
|
```proto
|
||||||
|
SeriesRequest request = {
|
||||||
|
min_time: [Timestamp 2 days ago],
|
||||||
|
max_time: [Current Timestamp],
|
||||||
|
max_resolution_window: 1h, // the minimum time range between two samples, relates to the downsampling levels
|
||||||
|
matchers: [
|
||||||
|
{ name: "__name__", value: "http_requests_total", type: EQUAL },
|
||||||
|
{ name: "method", value: "GET", type: EQUAL },
|
||||||
|
{ name: "status", value: "200", type: EQUAL }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The Store Gateway processes this request in several steps:
|
||||||
|
|
||||||
|
* **Metadata processing**: The Store Gateway first examines the block [metadata](https://thanos.io/tip/thanos/storage.md/#metadata-file-metajson) to determine the relevance of each block to the query. It evaluates the time range (`minTime` and `maxTime`) and external labels (`thanos.labels`). Blocks are deemed relevant if their timestamps overlap with the query's time range and if their resolution (`thanos.downsample.resolution`) matches the query's maximum allowed resolution.
|
||||||
|
* **Index processing**: Next, the Store Gateway retrieves the [indexes](https://thanos.io/tip/thanos/storage.md/#index-format-index) of candidate blocks. This involves:
|
||||||
|
* Fetching postings lists for each label specified in the query. These are inverted indexes where each label and value has an associated sorted list of all the corresponding time series IDs. Example:
|
||||||
|
* `"__name__=http_requests_total": [1, 2, 3]`,
|
||||||
|
* `"method=GET": [1, 2, 6]`,
|
||||||
|
* `"status=200": [1, 2, 5]`
|
||||||
|
* Intersecting these postings lists to select series matching all query labels. In our example these are series 1 and 2.
|
||||||
|
* Retrieving the series section from the index for these series, which includes the chunk files, the time ranges and offset position in the file. Example:
|
||||||
|
* Series 1: [Chunk 1: mint=t0, maxt=t1, fileRef=0001, offset=0], ...
|
||||||
|
* Determining the relevant chunks based on their time range intersection with the query.
|
||||||
|
* **Chunks retrieval**: The Store Gateway then fetches the appropriate chunks, either from the object storage directly or from a chunk cache. When retrieving from the object store, the Gateway leverages its API to read only the needed bytes (i.e., using S3 range requests), bypassing the need to download entire chunk files.
|
||||||
|
|
||||||
|
Then, the Gateway streams the selected chunks to the requesting Querier.
|
||||||
|
|
||||||
|
#### Optimizing the Store Gateway
|
||||||
|
|
||||||
|
Understanding the retrieval algorithm highlights the critical role of an external [index cache](https://thanos.io/tip/components/store.md/#index-cache) in the Store Gateway's operation. This is configured using the `--index-cache.config` flag. Indexes contain all labels and values of the block, which can result in large sizes. When the cache is full, Least Recently Used (LRU) eviction is applied. In scenarios where no external cache is configured, a portion of the memory will be utilized as a cache, managed via the `--index-cache.size` flag.
|
||||||
|
|
||||||
|
Moreover, the direct retrieval of chunks from object storage can be suboptimal, and result in excessive costs, especially with a high volume of queries. To mitigate this, employing a [caching bucket](https://thanos.io/tip/components/store.md/#caching-bucket) can significantly reduce the number of queries to the object storage. This is configured using the `--store.caching-bucket.config` flag. This chunk caching strategy is particularly effective when data access patterns are predominantly focused on recent data. By caching these frequently accessed chunks, query performance is enhanced, and the load on object storage is reduced.
|
||||||
|
|
||||||
|
Finally, you can implement the same safeguards as the Receive component by setting limits on the number of samples and series that can be queried. This is accomplished using the same `--store.limits.request-samples` and `--store.limits.request-series` flags.
|
||||||
|
|
||||||
|
#### Scaling the Store Gateway
|
||||||
|
|
||||||
|
The performance of Thanos Store components can be notably improved by managing concurrency and implementing sharding strategies.
|
||||||
|
|
||||||
|
Adjusting the level of concurrency can have a significant impact on performance. This is managed through the `--store.grpc.series-max-concurrency` flag, which sets the number of allowed concurrent series requests on the Store API. Other lower-level concurrency settings are also available.
|
||||||
|
|
||||||
|
After optimizing the store processing, you can distribute the query load using sharding strategies similar to what was done with the Compactor. Using a relabel configuration, you can assign a disjoint set of blocks to each Store Gateway replica. Here’s an example of how to set up sharding using the `--selector.relabel-config` flag:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- action: hashmod
|
||||||
|
source_labels:
|
||||||
|
- tenant_id # An external label that identifies some block streams
|
||||||
|
target_label: shard
|
||||||
|
modulus: 2 # The number of Store Gateways replicas
|
||||||
|
- action: keep
|
||||||
|
source_labels:
|
||||||
|
- shard
|
||||||
|
regex: 0 # The shard number assigned to this Store Gateway
|
||||||
|
```
|
||||||
|
|
||||||
|
Sharding based on the `__block_id` is not recommended because it prevents Stores from selecting the most relevant data resolution needed for a query. For example, one store might see only the raw data and return it, while another store sees the downsampled version for the same query and also returns it. This duplication creates unnecessary overhead.
|
||||||
|
|
||||||
|
External label based shrading avoids this issue. By giving a store a complete view of a stream's data (both raw and downsampled), it can effectively select the most appropriate resolution.
|
||||||
|
|
||||||
|
If external label sharding is not sufficient, you can combine it with time partitioning using the `--min-time` and `--max-time` flags. This process is done at the chunk level, meaning you can use shorter time intervals for recent data in 2 hour blocks, but you must use longer intervals for older data to account for horizontal compaction. The goal is for any store instance to have a complete view of the stream's data at every resolution for a given time slot, allowing it to return the unique and most appropriate data.
|
||||||
|
|
||||||
|
### Conclusion
|
||||||
|
|
||||||
|
In this second part, we explored how Thanos manages data for efficient storage and retrieval. We examined how the Receive component prepares samples and exposes local data for queries, and how the Compactor optimizes data through compaction and downsampling. We also discussed how the Store Gateway retrieves data and can be optimized by leveraging indexes and implementing sharding strategies.
|
||||||
|
|
||||||
|
Now that our samples are efficiently stored and prepared for queries, we can move on to the final part of this series, where we will explore how this distributed data is retrieved by query components like the Querier.
|
||||||
|
|
||||||
|
See the full list of articles in this series:
|
||||||
|
|
||||||
|
* Life of a sample in thanos, and how to configure it – Ingestion – Part I
|
||||||
|
* Life of a sample in thanos, and how to configure it – Data Management – Part II
|
||||||
|
* Life of a sample in thanos, and how to configure it – Querying – Part III
|
Binary file not shown.
After Width: | Height: | Size: 218 KiB |
Binary file not shown.
After Width: | Height: | Size: 756 KiB |
Binary file not shown.
After Width: | Height: | Size: 244 KiB |
Binary file not shown.
After Width: | Height: | Size: 72 KiB |
Binary file not shown.
After Width: | Height: | Size: 144 KiB |
Binary file not shown.
After Width: | Height: | Size: 126 KiB |
|
@ -114,13 +114,13 @@ Note that deduplication of HA groups is not supported by the `chain` algorithm.
|
||||||
|
|
||||||
## Thanos PromQL Engine (experimental)
|
## Thanos PromQL Engine (experimental)
|
||||||
|
|
||||||
By default, Thanos querier comes with standard Prometheus PromQL engine. However, when `--query.promql-engine=thanos` is specified, Thanos will use [experimental Thanos PromQL engine](http://github.com/thanos-community/promql-engine) which is a drop-in, efficient implementation of PromQL engine with query planner and optimizers.
|
By default, Thanos querier comes with standard Prometheus PromQL engine. However, when `--query.promql-engine=thanos` is specified, Thanos will use [experimental Thanos PromQL engine](http://github.com/thanos-io/promql-engine) which is a drop-in, efficient implementation of PromQL engine with query planner and optimizers.
|
||||||
|
|
||||||
To learn more, see [the introduction talk](https://youtu.be/pjkWzDVxWk4?t=3609) from [the PromConEU 2022](https://promcon.io/2022-munich/talks/opening-pandoras-box-redesigning/).
|
To learn more, see [the introduction talk](https://youtu.be/pjkWzDVxWk4?t=3609) from [the PromConEU 2022](https://promcon.io/2022-munich/talks/opening-pandoras-box-redesigning/).
|
||||||
|
|
||||||
This feature is still **experimental** given active development. All queries should be supported due to bulit-in fallback to old PromQL if something is not yet implemented.
|
This feature is still **experimental** given active development. All queries should be supported due to bulit-in fallback to old PromQL if something is not yet implemented.
|
||||||
|
|
||||||
For new engine bugs/issues, please use https://github.com/thanos-community/promql-engine GitHub issues.
|
For new engine bugs/issues, please use https://github.com/thanos-io/promql-engine GitHub issues.
|
||||||
|
|
||||||
### Distributed execution mode
|
### Distributed execution mode
|
||||||
|
|
||||||
|
|
|
@ -503,6 +503,9 @@ Flags:
|
||||||
options for now: promql-experimental-functions
|
options for now: promql-experimental-functions
|
||||||
(enables promql experimental functions for
|
(enables promql experimental functions for
|
||||||
ruler)
|
ruler)
|
||||||
|
--[no-]tsdb.enable-native-histograms
|
||||||
|
[EXPERIMENTAL] Enables the ingestion of native
|
||||||
|
histograms.
|
||||||
--remote-write.config-file=<file-path>
|
--remote-write.config-file=<file-path>
|
||||||
Path to YAML config for the remote-write
|
Path to YAML config for the remote-write
|
||||||
configurations, that specify servers
|
configurations, that specify servers
|
||||||
|
|
|
@ -1051,6 +1051,9 @@ Flags:
|
||||||
upload.
|
upload.
|
||||||
--label=key="value" ... External labels to add to the uploaded blocks
|
--label=key="value" ... External labels to add to the uploaded blocks
|
||||||
(repeated).
|
(repeated).
|
||||||
|
--[no-]shipper.upload-compacted
|
||||||
|
If true shipper will try to upload compacted
|
||||||
|
blocks as well.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ menu: proposals-accepted
|
||||||
* https://github.com/thanos-io/thanos/pull/5250
|
* https://github.com/thanos-io/thanos/pull/5250
|
||||||
* https://github.com/thanos-io/thanos/pull/4917
|
* https://github.com/thanos-io/thanos/pull/4917
|
||||||
* https://github.com/thanos-io/thanos/pull/5350
|
* https://github.com/thanos-io/thanos/pull/5350
|
||||||
* https://github.com/thanos-community/promql-engine/issues/25
|
* https://github.com/thanos-io/promql-engine/issues/25
|
||||||
|
|
||||||
## 2 Why
|
## 2 Why
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ Keeping PromQL execution in Query components allows for deduplication between Pr
|
||||||
|
|
||||||
<img src="../img/distributed-execution-proposal-1.png" alt="Distributed query execution" width="400"/>
|
<img src="../img/distributed-execution-proposal-1.png" alt="Distributed query execution" width="400"/>
|
||||||
|
|
||||||
The initial version of the solution can be found here: https://github.com/thanos-community/promql-engine/pull/139
|
The initial version of the solution can be found here: https://github.com/thanos-io/promql-engine/pull/139
|
||||||
|
|
||||||
### Query rewrite algorithm
|
### Query rewrite algorithm
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ Enforcing tenancy label in queries:
|
||||||
|
|
||||||
#### Apply verification and enforcement logic in the Query Frontend instead of Querier.
|
#### Apply verification and enforcement logic in the Query Frontend instead of Querier.
|
||||||
|
|
||||||
The Query Frontend is an optional component on any Thanos deployment, while the Querier is always present. Plus, there might be deployments with multiple Querier layers where one or more might need to apply tenant verification and enforcement. On top of this, doing it in the Querier supports future work on using the [new Thanos PromQL engine](https://github.com/thanos-community/promql-engine), which can potentially make the Query Frontend unnecessary.
|
The Query Frontend is an optional component on any Thanos deployment, while the Querier is always present. Plus, there might be deployments with multiple Querier layers where one or more might need to apply tenant verification and enforcement. On top of this, doing it in the Querier supports future work on using the [new Thanos PromQL engine](https://github.com/thanos-io/promql-engine), which can potentially make the Query Frontend unnecessary.
|
||||||
|
|
||||||
#### Add the tenant identification as an optional field in the Store API protobuffer spec instead of an HTTP header.
|
#### Add the tenant identification as an optional field in the Store API protobuffer spec instead of an HTTP header.
|
||||||
|
|
||||||
|
|
|
@ -143,4 +143,4 @@ An alternative to this is to use the existing [hashmod](https://prometheus.io/do
|
||||||
|
|
||||||
Once a Prometheus instance has been drained and no longer has targets to scrape we will wish to scale down and remove the instance. However, we will need to ensure that the data that is currently in the WAL block but not uploaded to object storage is flushed before we can remove the instance. Failing to do so will mean that any data in the WAL is lost when the Prometheus node is terminated. During this flush period until it is confirmed that the WAL has been uploaded we should still have the Prometheus instance serve requests for the data in the WAL.
|
Once a Prometheus instance has been drained and no longer has targets to scrape we will wish to scale down and remove the instance. However, we will need to ensure that the data that is currently in the WAL block but not uploaded to object storage is flushed before we can remove the instance. Failing to do so will mean that any data in the WAL is lost when the Prometheus node is terminated. During this flush period until it is confirmed that the WAL has been uploaded we should still have the Prometheus instance serve requests for the data in the WAL.
|
||||||
|
|
||||||
See [prometheus/tsdb - Issue 346](https://github.com/prometheus/tsdb/issues/346) for more information.
|
See [prometheus/tsdb - Issue 346](https://github.com/prometheus-junkyard/tsdb/issues/346) for more information.
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
|
"github.com/thanos-io/objstore"
|
||||||
"github.com/thanos-io/objstore/client"
|
"github.com/thanos-io/objstore/client"
|
||||||
"github.com/thanos-io/objstore/providers/s3"
|
"github.com/thanos-io/objstore/providers/s3"
|
||||||
tracingclient "github.com/thanos-io/thanos/pkg/tracing/client"
|
tracingclient "github.com/thanos-io/thanos/pkg/tracing/client"
|
||||||
|
@ -176,7 +177,7 @@ func TestReadOnlyThanosSetup(t *testing.T) {
|
||||||
// │ │
|
// │ │
|
||||||
// └───────────┘
|
// └───────────┘
|
||||||
bkt1Config, err := yaml.Marshal(client.BucketConfig{
|
bkt1Config, err := yaml.Marshal(client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: s3.Config{
|
Config: s3.Config{
|
||||||
Bucket: "bkt1",
|
Bucket: "bkt1",
|
||||||
AccessKey: e2edb.MinioAccessKey,
|
AccessKey: e2edb.MinioAccessKey,
|
||||||
|
@ -198,7 +199,7 @@ func TestReadOnlyThanosSetup(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
bkt2Config, err := yaml.Marshal(client.BucketConfig{
|
bkt2Config, err := yaml.Marshal(client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: s3.Config{
|
Config: s3.Config{
|
||||||
Bucket: "bkt2",
|
Bucket: "bkt2",
|
||||||
AccessKey: e2edb.MinioAccessKey,
|
AccessKey: e2edb.MinioAccessKey,
|
||||||
|
|
66
go.mod
66
go.mod
|
@ -4,7 +4,7 @@ go 1.24.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
capnproto.org/go/capnp/v3 v3.1.0-alpha.1
|
capnproto.org/go/capnp/v3 v3.1.0-alpha.1
|
||||||
cloud.google.com/go/trace v1.11.4
|
cloud.google.com/go/trace v1.11.6
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.27.0
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.27.0
|
||||||
github.com/KimMachineGun/automemlimit v0.7.3
|
github.com/KimMachineGun/automemlimit v0.7.3
|
||||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
|
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
|
||||||
|
@ -49,7 +49,7 @@ require (
|
||||||
github.com/minio/sha256-simd v1.0.1
|
github.com/minio/sha256-simd v1.0.1
|
||||||
github.com/mitchellh/go-ps v1.0.0
|
github.com/mitchellh/go-ps v1.0.0
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||||
github.com/oklog/run v1.1.0
|
github.com/oklog/run v1.2.0
|
||||||
github.com/oklog/ulid v1.3.1 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5
|
github.com/olekukonko/tablewriter v0.0.5
|
||||||
github.com/onsi/gomega v1.36.2
|
github.com/onsi/gomega v1.36.2
|
||||||
|
@ -60,16 +60,16 @@ require (
|
||||||
github.com/prometheus/alertmanager v0.28.1
|
github.com/prometheus/alertmanager v0.28.1
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/prometheus/client_golang v1.22.0
|
||||||
github.com/prometheus/client_model v0.6.2
|
github.com/prometheus/client_model v0.6.2
|
||||||
github.com/prometheus/common v0.63.0
|
github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3
|
||||||
github.com/prometheus/exporter-toolkit v0.14.0
|
github.com/prometheus/exporter-toolkit v0.14.0
|
||||||
// Prometheus maps version 3.x.y to tags v0.30x.y.
|
// Prometheus maps version 3.x.y to tags v0.30x.y.
|
||||||
github.com/prometheus/prometheus v0.303.1
|
github.com/prometheus/prometheus v0.304.3-0.20250708181613-d8c921804e87
|
||||||
github.com/redis/rueidis v1.0.61
|
github.com/redis/rueidis v1.0.61
|
||||||
github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771
|
github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771
|
||||||
github.com/sony/gobreaker v1.0.0
|
github.com/sony/gobreaker v1.0.0
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.10.0
|
||||||
github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97
|
github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3
|
||||||
github.com/thanos-io/promql-engine v0.0.0-20250522103302-dd83bd8fdb50
|
github.com/thanos-io/promql-engine v0.0.0-20250711160436-eb186b2cf537
|
||||||
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
||||||
github.com/vimeo/galaxycache v1.3.1
|
github.com/vimeo/galaxycache v1.3.1
|
||||||
github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5
|
github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5
|
||||||
|
@ -95,21 +95,21 @@ require (
|
||||||
golang.org/x/text v0.26.0
|
golang.org/x/text v0.26.0
|
||||||
golang.org/x/time v0.12.0
|
golang.org/x/time v0.12.0
|
||||||
google.golang.org/grpc v1.73.0
|
google.golang.org/grpc v1.73.0
|
||||||
google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429
|
google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20
|
||||||
google.golang.org/protobuf v1.36.6
|
google.golang.org/protobuf v1.36.6
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.118.0 // indirect
|
cloud.google.com/go v0.120.0 // indirect
|
||||||
cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd // indirect
|
cloud.google.com/go/auth v0.16.2 // indirect
|
||||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||||
cloud.google.com/go/iam v1.3.1 // indirect
|
cloud.google.com/go/iam v1.5.2 // indirect
|
||||||
cloud.google.com/go/storage v1.43.0 // indirect
|
cloud.google.com/go/storage v1.50.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||||
|
@ -118,7 +118,7 @@ require (
|
||||||
require (
|
require (
|
||||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||||
github.com/oklog/ulid/v2 v2.1.1
|
github.com/oklog/ulid/v2 v2.1.1
|
||||||
github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5
|
github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
|
||||||
github.com/tjhop/slog-gokit v0.1.4
|
github.com/tjhop/slog-gokit v0.1.4
|
||||||
go.opentelemetry.io/collector/pdata v1.34.0
|
go.opentelemetry.io/collector/pdata v1.34.0
|
||||||
go.opentelemetry.io/collector/semconv v0.128.0
|
go.opentelemetry.io/collector/semconv v0.128.0
|
||||||
|
@ -145,7 +145,6 @@ require (
|
||||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||||
github.com/sercand/kuberesolver/v4 v4.0.0 // indirect
|
github.com/sercand/kuberesolver/v4 v4.0.0 // indirect
|
||||||
github.com/zhangyunhao116/umap v0.0.0-20250307031311-0b61e69e958b // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||||
go.opentelemetry.io/contrib/propagators/ot v1.36.0 // indirect
|
go.opentelemetry.io/contrib/propagators/ot v1.36.0 // indirect
|
||||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
||||||
|
@ -159,11 +158,14 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
cel.dev/expr v0.23.1 // indirect
|
||||||
|
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||||
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect
|
||||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect
|
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect
|
||||||
github.com/armon/go-radix v1.0.0 // indirect
|
github.com/armon/go-radix v1.0.0 // indirect
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.29.15 // indirect
|
github.com/aws/aws-sdk-go-v2/config v1.29.15 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.68 // indirect
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.68 // indirect
|
||||||
|
@ -190,8 +192,10 @@ require (
|
||||||
github.com/elastic/go-sysinfo v1.15.3 // indirect
|
github.com/elastic/go-sysinfo v1.15.3 // indirect
|
||||||
github.com/elastic/go-windows v1.0.2 // indirect
|
github.com/elastic/go-windows v1.0.2 // indirect
|
||||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.4.3 // indirect
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
@ -213,13 +217,12 @@ require (
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect
|
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||||
github.com/gorilla/mux v1.8.1 // indirect
|
github.com/gorilla/mux v1.8.1 // indirect
|
||||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||||
github.com/jaegertracing/jaeger-idl v0.6.0 // indirect
|
github.com/jaegertracing/jaeger-idl v0.6.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||||
|
@ -231,8 +234,9 @@ require (
|
||||||
github.com/mailru/easyjson v0.9.0 // indirect
|
github.com/mailru/easyjson v0.9.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
|
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/minio-go/v7 v7.0.80 // indirect
|
github.com/minio/minio-go/v7 v7.0.93 // indirect
|
||||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||||
|
@ -246,22 +250,27 @@ require (
|
||||||
github.com/opentracing-contrib/go-grpc v0.1.2 // indirect
|
github.com/opentracing-contrib/go-grpc v0.1.2 // indirect
|
||||||
github.com/opentracing-contrib/go-stdlib v1.1.0 // indirect
|
github.com/opentracing-contrib/go-stdlib v1.1.0 // indirect
|
||||||
github.com/oracle/oci-go-sdk/v65 v65.93.1 // indirect
|
github.com/oracle/oci-go-sdk/v65 v65.93.1 // indirect
|
||||||
|
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||||
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/procfs v0.16.1 // indirect
|
github.com/prometheus/procfs v0.16.1 // indirect
|
||||||
github.com/prometheus/sigv4 v0.1.2 // indirect
|
github.com/prometheus/sigv4 v0.2.0 // indirect
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
github.com/rs/xid v1.6.0 // indirect
|
github.com/rs/xid v1.6.0 // indirect
|
||||||
github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect
|
github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
|
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||||
github.com/stretchr/objx v0.5.2 // indirect
|
github.com/stretchr/objx v0.5.2 // indirect
|
||||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.66 // indirect
|
github.com/tencentyun/cos-go-sdk-v5 v0.7.66 // indirect
|
||||||
|
github.com/tinylib/msgp v1.3.0 // indirect
|
||||||
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
|
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
|
||||||
github.com/weaveworks/promrus v1.2.0 // indirect
|
github.com/weaveworks/promrus v1.2.0 // indirect
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||||
github.com/yuin/gopher-lua v1.1.1 // indirect
|
github.com/yuin/gopher-lua v1.1.1 // indirect
|
||||||
|
github.com/zeebo/errs v1.4.0 // indirect
|
||||||
go.elastic.co/apm/module/apmhttp v1.15.0 // indirect
|
go.elastic.co/apm/module/apmhttp v1.15.0 // indirect
|
||||||
go.elastic.co/fastjson v1.5.1 // indirect
|
go.elastic.co/fastjson v1.5.1 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.17.4 // indirect
|
go.mongodb.org/mongo-driver v1.17.4 // indirect
|
||||||
|
@ -276,6 +285,7 @@ require (
|
||||||
go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
|
go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
|
||||||
go.opentelemetry.io/collector/processor v1.34.0 // indirect
|
go.opentelemetry.io/collector/processor v1.34.0 // indirect
|
||||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||||
go.opentelemetry.io/contrib/propagators/aws v1.36.0 // indirect
|
go.opentelemetry.io/contrib/propagators/aws v1.36.0 // indirect
|
||||||
|
@ -283,6 +293,7 @@ require (
|
||||||
go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 // indirect
|
go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
|
@ -292,11 +303,11 @@ require (
|
||||||
golang.org/x/sys v0.33.0 // indirect
|
golang.org/x/sys v0.33.0 // indirect
|
||||||
golang.org/x/tools v0.34.0 // indirect
|
golang.org/x/tools v0.34.0 // indirect
|
||||||
gonum.org/v1/gonum v0.16.0 // indirect
|
gonum.org/v1/gonum v0.16.0 // indirect
|
||||||
google.golang.org/api v0.228.0 // indirect
|
google.golang.org/api v0.238.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20250122153221-138b5a5a4fd4 // indirect
|
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||||
howett.net/plist v1.0.1 // indirect
|
howett.net/plist v1.0.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
zenhack.net/go/util v0.0.0-20230607025951-8b02fee814ae // indirect
|
zenhack.net/go/util v0.0.0-20230414204917-531d38494cf5 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
|
@ -307,17 +318,14 @@ replace (
|
||||||
// Required by Cortex https://github.com/cortexproject/cortex/pull/3051.
|
// Required by Cortex https://github.com/cortexproject/cortex/pull/3051.
|
||||||
github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab
|
github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab
|
||||||
|
|
||||||
// v3.3.1 with https://github.com/prometheus/prometheus/pull/16252.
|
|
||||||
github.com/prometheus/prometheus => github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88
|
|
||||||
|
|
||||||
// Pin kuberesolver/v5 to support new grpc version. Need to upgrade kuberesolver version on weaveworks/common.
|
// Pin kuberesolver/v5 to support new grpc version. Need to upgrade kuberesolver version on weaveworks/common.
|
||||||
github.com/sercand/kuberesolver/v4 => github.com/sercand/kuberesolver/v5 v5.1.1
|
github.com/sercand/kuberesolver/v4 => github.com/sercand/kuberesolver/v5 v5.1.1
|
||||||
|
|
||||||
github.com/vimeo/galaxycache => github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e
|
github.com/vimeo/galaxycache => github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e
|
||||||
|
|
||||||
// Pinning grpc due https://github.com/grpc/grpc-go/issues/7314
|
|
||||||
google.golang.org/grpc => google.golang.org/grpc v1.63.2
|
|
||||||
|
|
||||||
// Overriding to use latest commit.
|
// Overriding to use latest commit.
|
||||||
gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497
|
gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497
|
||||||
|
|
||||||
|
// The domain `zenhack.net` expired.
|
||||||
|
zenhack.net/go/util => github.com/zenhack/go-util v0.0.0-20231005031245-66f5419c2aea
|
||||||
)
|
)
|
||||||
|
|
|
@ -334,7 +334,12 @@ func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This resolves the start() and end() used with the @ modifier.
|
// This resolves the start() and end() used with the @ modifier.
|
||||||
expr = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd()))
|
expr, err = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd()), time.Duration(r.GetStep())*time.Millisecond)
|
||||||
|
if err != nil {
|
||||||
|
// We are being pessimistic in such cases.
|
||||||
|
level.Warn(s.logger).Log("msg", "failed to preprocess expr", "query", query, "err", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
end := r.GetEnd()
|
end := r.GetEnd()
|
||||||
atModCachable := true
|
atModCachable := true
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/util/annotations"
|
"github.com/prometheus/prometheus/util/annotations"
|
||||||
|
@ -31,7 +32,7 @@ import (
|
||||||
func TestGRPCQueryAPIWithQueryPlan(t *testing.T) {
|
func TestGRPCQueryAPIWithQueryPlan(t *testing.T) {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
proxy := store.NewProxyStore(logger, reg, func() []store.Client { return nil }, component.Store, nil, 1*time.Minute, store.LazyRetrieval)
|
proxy := store.NewProxyStore(logger, reg, func() []store.Client { return nil }, component.Store, labels.EmptyLabels(), 1*time.Minute, store.LazyRetrieval)
|
||||||
queryableCreator := query.NewQueryableCreator(logger, reg, proxy, 1, 1*time.Minute, dedup.AlgorithmPenalty)
|
queryableCreator := query.NewQueryableCreator(logger, reg, proxy, 1, 1*time.Minute, dedup.AlgorithmPenalty)
|
||||||
remoteEndpointsCreator := query.NewRemoteEndpointsCreator(logger, func() []query.Client { return nil }, nil, 1*time.Minute, true, true)
|
remoteEndpointsCreator := query.NewRemoteEndpointsCreator(logger, func() []query.Client { return nil }, nil, 1*time.Minute, true, true)
|
||||||
lookbackDeltaFunc := func(i int64) time.Duration { return 5 * time.Minute }
|
lookbackDeltaFunc := func(i int64) time.Duration { return 5 * time.Minute }
|
||||||
|
@ -39,7 +40,7 @@ func TestGRPCQueryAPIWithQueryPlan(t *testing.T) {
|
||||||
|
|
||||||
expr, err := extpromql.ParseExpr("metric")
|
expr, err := extpromql.ParseExpr("metric")
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
lplan := logicalplan.NewFromAST(expr, &equery.Options{}, logicalplan.PlanOptions{})
|
lplan, err := logicalplan.NewFromAST(expr, &equery.Options{}, logicalplan.PlanOptions{})
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
// Create a mock query plan.
|
// Create a mock query plan.
|
||||||
planBytes, err := logicalplan.Marshal(lplan.Root())
|
planBytes, err := logicalplan.Marshal(lplan.Root())
|
||||||
|
@ -75,7 +76,7 @@ func TestGRPCQueryAPIWithQueryPlan(t *testing.T) {
|
||||||
func TestGRPCQueryAPIErrorHandling(t *testing.T) {
|
func TestGRPCQueryAPIErrorHandling(t *testing.T) {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
proxy := store.NewProxyStore(logger, reg, func() []store.Client { return nil }, component.Store, nil, 1*time.Minute, store.LazyRetrieval)
|
proxy := store.NewProxyStore(logger, reg, func() []store.Client { return nil }, component.Store, labels.EmptyLabels(), 1*time.Minute, store.LazyRetrieval)
|
||||||
queryableCreator := query.NewQueryableCreator(logger, reg, proxy, 1, 1*time.Minute, dedup.AlgorithmPenalty)
|
queryableCreator := query.NewQueryableCreator(logger, reg, proxy, 1, 1*time.Minute, dedup.AlgorithmPenalty)
|
||||||
remoteEndpointsCreator := query.NewRemoteEndpointsCreator(logger, func() []query.Client { return nil }, nil, 1*time.Minute, true, true)
|
remoteEndpointsCreator := query.NewRemoteEndpointsCreator(logger, func() []query.Client { return nil }, nil, 1*time.Minute, true, true)
|
||||||
lookbackDeltaFunc := func(i int64) time.Duration { return 5 * time.Minute }
|
lookbackDeltaFunc := func(i int64) time.Duration { return 5 * time.Minute }
|
||||||
|
|
|
@ -43,6 +43,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/util/annotations"
|
"github.com/prometheus/prometheus/util/annotations"
|
||||||
"github.com/prometheus/prometheus/util/stats"
|
"github.com/prometheus/prometheus/util/stats"
|
||||||
|
v1 "github.com/prometheus/prometheus/web/api/v1"
|
||||||
"github.com/thanos-io/promql-engine/engine"
|
"github.com/thanos-io/promql-engine/engine"
|
||||||
|
|
||||||
"github.com/thanos-io/thanos/pkg/api"
|
"github.com/thanos-io/thanos/pkg/api"
|
||||||
|
@ -110,6 +111,7 @@ type QueryAPI struct {
|
||||||
|
|
||||||
replicaLabels []string
|
replicaLabels []string
|
||||||
endpointStatus func() []query.EndpointStatus
|
endpointStatus func() []query.EndpointStatus
|
||||||
|
tsdbSelector *store.TSDBSelector
|
||||||
|
|
||||||
defaultRangeQueryStep time.Duration
|
defaultRangeQueryStep time.Duration
|
||||||
defaultInstantQueryMaxSourceResolution time.Duration
|
defaultInstantQueryMaxSourceResolution time.Duration
|
||||||
|
@ -159,6 +161,7 @@ func NewQueryAPI(
|
||||||
tenantCertField string,
|
tenantCertField string,
|
||||||
enforceTenancy bool,
|
enforceTenancy bool,
|
||||||
tenantLabel string,
|
tenantLabel string,
|
||||||
|
tsdbSelector *store.TSDBSelector,
|
||||||
) *QueryAPI {
|
) *QueryAPI {
|
||||||
if statsAggregatorFactory == nil {
|
if statsAggregatorFactory == nil {
|
||||||
statsAggregatorFactory = &store.NoopSeriesStatsAggregatorFactory{}
|
statsAggregatorFactory = &store.NoopSeriesStatsAggregatorFactory{}
|
||||||
|
@ -194,6 +197,7 @@ func NewQueryAPI(
|
||||||
tenantCertField: tenantCertField,
|
tenantCertField: tenantCertField,
|
||||||
enforceTenancy: enforceTenancy,
|
enforceTenancy: enforceTenancy,
|
||||||
tenantLabel: tenantLabel,
|
tenantLabel: tenantLabel,
|
||||||
|
tsdbSelector: tsdbSelector,
|
||||||
|
|
||||||
queryRangeHist: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
|
queryRangeHist: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
|
||||||
Name: "thanos_query_range_requested_timespan_duration_seconds",
|
Name: "thanos_query_range_requested_timespan_duration_seconds",
|
||||||
|
@ -675,7 +679,9 @@ func (qapi *QueryAPI) query(r *http.Request) (interface{}, []error, *api.ApiErro
|
||||||
}
|
}
|
||||||
return nil, nil, &api.ApiError{Typ: api.ErrorExec, Err: res.Err}, qry.Close
|
return nil, nil, &api.ApiError{Typ: api.ErrorExec, Err: res.Err}, qry.Close
|
||||||
}
|
}
|
||||||
warnings = append(warnings, res.Warnings.AsErrors()...)
|
// this prevents a panic when annotations are concurrently accessed
|
||||||
|
safeWarnings := annotations.New().Merge(res.Warnings)
|
||||||
|
warnings = append(warnings, safeWarnings.AsErrors()...)
|
||||||
|
|
||||||
var analysis queryTelemetry
|
var analysis queryTelemetry
|
||||||
if qapi.parseQueryAnalyzeParam(r) {
|
if qapi.parseQueryAnalyzeParam(r) {
|
||||||
|
@ -984,7 +990,9 @@ func (qapi *QueryAPI) queryRange(r *http.Request) (interface{}, []error, *api.Ap
|
||||||
}
|
}
|
||||||
return nil, nil, &api.ApiError{Typ: api.ErrorExec, Err: res.Err}, qry.Close
|
return nil, nil, &api.ApiError{Typ: api.ErrorExec, Err: res.Err}, qry.Close
|
||||||
}
|
}
|
||||||
warnings = append(warnings, res.Warnings.AsErrors()...)
|
// this prevents a panic when annotations are concurrently accessed
|
||||||
|
safeWarnings := annotations.New().Merge(res.Warnings)
|
||||||
|
warnings = append(warnings, safeWarnings.AsErrors()...)
|
||||||
|
|
||||||
var analysis queryTelemetry
|
var analysis queryTelemetry
|
||||||
if qapi.parseQueryAnalyzeParam(r) {
|
if qapi.parseQueryAnalyzeParam(r) {
|
||||||
|
@ -1295,7 +1303,20 @@ func (qapi *QueryAPI) stores(_ *http.Request) (interface{}, []error, *api.ApiErr
|
||||||
if status.ComponentType == nil {
|
if status.ComponentType == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
statuses[status.ComponentType.String()] = append(statuses[status.ComponentType.String()], status)
|
|
||||||
|
// Apply TSDBSelector filtering to LabelSets if selector is configured
|
||||||
|
filteredStatus := status
|
||||||
|
if qapi.tsdbSelector != nil && len(status.LabelSets) > 0 {
|
||||||
|
matches, filteredLabelSets := qapi.tsdbSelector.MatchLabelSets(status.LabelSets...)
|
||||||
|
if !matches {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if filteredLabelSets != nil {
|
||||||
|
filteredStatus.LabelSets = filteredLabelSets
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
statuses[status.ComponentType.String()] = append(statuses[status.ComponentType.String()], filteredStatus)
|
||||||
}
|
}
|
||||||
return statuses, nil, nil, func() {}
|
return statuses, nil, nil, func() {}
|
||||||
}
|
}
|
||||||
|
@ -1446,11 +1467,11 @@ func NewExemplarsHandler(client exemplars.UnaryClient, enablePartialResponse boo
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
start, err := parseTimeParam(r, "start", infMinTime)
|
start, err := parseTimeParam(r, "start", v1.MinTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {}
|
return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {}
|
||||||
}
|
}
|
||||||
end, err := parseTimeParam(r, "end", infMaxTime)
|
end, err := parseTimeParam(r, "end", v1.MaxTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {}
|
return nil, nil, &api.ApiError{Typ: api.ErrorBadData, Err: err}, func() {}
|
||||||
}
|
}
|
||||||
|
@ -1473,17 +1494,12 @@ func NewExemplarsHandler(client exemplars.UnaryClient, enablePartialResponse boo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
infMinTime = time.Unix(math.MinInt64/1000+62135596801, 0)
|
|
||||||
infMaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999)
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseMetadataTimeRange(r *http.Request, defaultMetadataTimeRange time.Duration) (time.Time, time.Time, error) {
|
func parseMetadataTimeRange(r *http.Request, defaultMetadataTimeRange time.Duration) (time.Time, time.Time, error) {
|
||||||
// If start and end time not specified as query parameter, we get the range from the beginning of time by default.
|
// If start and end time not specified as query parameter, we get the range from the beginning of time by default.
|
||||||
var defaultStartTime, defaultEndTime time.Time
|
var defaultStartTime, defaultEndTime time.Time
|
||||||
if defaultMetadataTimeRange == 0 {
|
if defaultMetadataTimeRange == 0 {
|
||||||
defaultStartTime = infMinTime
|
defaultStartTime = v1.MinTime
|
||||||
defaultEndTime = infMaxTime
|
defaultEndTime = v1.MaxTime
|
||||||
} else {
|
} else {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
defaultStartTime = now.Add(-defaultMetadataTimeRange)
|
defaultStartTime = now.Add(-defaultMetadataTimeRange)
|
||||||
|
|
|
@ -163,38 +163,13 @@ func testEndpoint(t *testing.T, test endpointTestCase, name string, responseComp
|
||||||
|
|
||||||
func TestQueryEndpoints(t *testing.T) {
|
func TestQueryEndpoints(t *testing.T) {
|
||||||
lbls := []labels.Labels{
|
lbls := []labels.Labels{
|
||||||
{
|
labels.FromStrings("__name__", "test_metric1", "foo", "bar"),
|
||||||
labels.Label{Name: "__name__", Value: "test_metric1"},
|
labels.FromStrings("__name__", "test_metric1", "foo", "boo"),
|
||||||
labels.Label{Name: "foo", Value: "bar"},
|
labels.FromStrings("__name__", "test_metric2", "foo", "boo"),
|
||||||
},
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "bar", "replica", "a"),
|
||||||
{
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica", "a"),
|
||||||
labels.Label{Name: "__name__", Value: "test_metric1"},
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica", "b"),
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica1", "a"),
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric2"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
|
||||||
labels.Label{Name: "foo", Value: "bar"},
|
|
||||||
labels.Label{Name: "replica", Value: "a"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
labels.Label{Name: "replica", Value: "a"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
labels.Label{Name: "replica", Value: "b"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
labels.Label{Name: "replica1", Value: "a"},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
db, err := e2eutil.NewTSDB()
|
db, err := e2eutil.NewTSDB()
|
||||||
|
@ -286,76 +261,24 @@ func TestQueryEndpoints(t *testing.T) {
|
||||||
ResultType: parser.ValueTypeVector,
|
ResultType: parser.ValueTypeVector,
|
||||||
Result: promql.Vector{
|
Result: promql.Vector{
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "bar", "replica", "a"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "bar",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica",
|
|
||||||
Value: "a",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica", "a"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "boo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica",
|
|
||||||
Value: "a",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica", "b"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "boo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica",
|
|
||||||
Value: "b",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica1", "a"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "boo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica1",
|
|
||||||
Value: "a",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -373,50 +296,19 @@ func TestQueryEndpoints(t *testing.T) {
|
||||||
ResultType: parser.ValueTypeVector,
|
ResultType: parser.ValueTypeVector,
|
||||||
Result: promql.Vector{
|
Result: promql.Vector{
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "bar"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "bar",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "boo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica1", "a"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "boo",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica1",
|
|
||||||
Value: "a",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -433,32 +325,14 @@ func TestQueryEndpoints(t *testing.T) {
|
||||||
ResultType: parser.ValueTypeVector,
|
ResultType: parser.ValueTypeVector,
|
||||||
Result: promql.Vector{
|
Result: promql.Vector{
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "bar"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "bar",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Metric: labels.Labels{
|
Metric: labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo"),
|
||||||
{
|
T: 123000,
|
||||||
Name: "__name__",
|
F: 2,
|
||||||
Value: "test_metric_replica1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Value: "boo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
T: 123000,
|
|
||||||
F: 2,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -504,7 +378,6 @@ func TestQueryEndpoints(t *testing.T) {
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}(500, 1),
|
}(500, 1),
|
||||||
Metric: nil,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -526,7 +399,6 @@ func TestQueryEndpoints(t *testing.T) {
|
||||||
{F: 1, T: timestamp.FromTime(start.Add(1 * time.Second))},
|
{F: 1, T: timestamp.FromTime(start.Add(1 * time.Second))},
|
||||||
{F: 2, T: timestamp.FromTime(start.Add(2 * time.Second))},
|
{F: 2, T: timestamp.FromTime(start.Add(2 * time.Second))},
|
||||||
},
|
},
|
||||||
Metric: nil,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -779,7 +651,6 @@ func TestQueryAnalyzeEndpoints(t *testing.T) {
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}(500, 1),
|
}(500, 1),
|
||||||
Metric: nil,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueryAnalysis: queryTelemetry{},
|
QueryAnalysis: queryTelemetry{},
|
||||||
|
@ -796,7 +667,7 @@ func TestQueryAnalyzeEndpoints(t *testing.T) {
|
||||||
func newProxyStoreWithTSDBStore(db store.TSDBReader) *store.ProxyStore {
|
func newProxyStoreWithTSDBStore(db store.TSDBReader) *store.ProxyStore {
|
||||||
c := &storetestutil.TestClient{
|
c := &storetestutil.TestClient{
|
||||||
Name: "1",
|
Name: "1",
|
||||||
StoreClient: storepb.ServerAsClient(store.NewTSDBStore(nil, db, component.Query, nil)),
|
StoreClient: storepb.ServerAsClient(store.NewTSDBStore(nil, db, component.Query, labels.EmptyLabels())),
|
||||||
MinTime: math.MinInt64, MaxTime: math.MaxInt64,
|
MinTime: math.MinInt64, MaxTime: math.MaxInt64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -805,7 +676,7 @@ func newProxyStoreWithTSDBStore(db store.TSDBReader) *store.ProxyStore {
|
||||||
nil,
|
nil,
|
||||||
func() []store.Client { return []store.Client{c} },
|
func() []store.Client { return []store.Client{c} },
|
||||||
component.Query,
|
component.Query,
|
||||||
nil,
|
labels.EmptyLabels(),
|
||||||
0,
|
0,
|
||||||
store.EagerRetrieval,
|
store.EagerRetrieval,
|
||||||
)
|
)
|
||||||
|
@ -813,41 +684,16 @@ func newProxyStoreWithTSDBStore(db store.TSDBReader) *store.ProxyStore {
|
||||||
|
|
||||||
func TestMetadataEndpoints(t *testing.T) {
|
func TestMetadataEndpoints(t *testing.T) {
|
||||||
var old = []labels.Labels{
|
var old = []labels.Labels{
|
||||||
{
|
labels.FromStrings("__name__", "test_metric1", "foo", "bar"),
|
||||||
labels.Label{Name: "__name__", Value: "test_metric1"},
|
labels.FromStrings("__name__", "test_metric1", "foo", "boo"),
|
||||||
labels.Label{Name: "foo", Value: "bar"},
|
labels.FromStrings("__name__", "test_metric2", "foo", "boo"),
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric1"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric2"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var recent = []labels.Labels{
|
var recent = []labels.Labels{
|
||||||
{
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "bar", "replica", "a"),
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica", "a"),
|
||||||
labels.Label{Name: "foo", Value: "bar"},
|
labels.FromStrings("__name__", "test_metric_replica1", "foo", "boo", "replica", "b"),
|
||||||
labels.Label{Name: "replica", Value: "a"},
|
labels.FromStrings("__name__", "test_metric_replica2", "foo", "boo", "replica1", "a"),
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
labels.Label{Name: "replica", Value: "a"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica1"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
labels.Label{Name: "replica", Value: "b"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels.Label{Name: "__name__", Value: "test_metric_replica2"},
|
|
||||||
labels.Label{Name: "foo", Value: "boo"},
|
|
||||||
labels.Label{Name: "replica1", Value: "a"},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
@ -2003,7 +1849,7 @@ func TestRulesHandler(t *testing.T) {
|
||||||
EvaluationTime: all[3].GetAlert().EvaluationDurationSeconds,
|
EvaluationTime: all[3].GetAlert().EvaluationDurationSeconds,
|
||||||
Duration: all[3].GetAlert().DurationSeconds,
|
Duration: all[3].GetAlert().DurationSeconds,
|
||||||
KeepFiringFor: all[3].GetAlert().KeepFiringForSeconds,
|
KeepFiringFor: all[3].GetAlert().KeepFiringForSeconds,
|
||||||
Annotations: nil,
|
Annotations: labels.EmptyLabels(),
|
||||||
Alerts: []*testpromcompatibility.Alert{},
|
Alerts: []*testpromcompatibility.Alert{},
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
|
@ -2018,7 +1864,7 @@ func TestRulesHandler(t *testing.T) {
|
||||||
EvaluationTime: all[4].GetAlert().EvaluationDurationSeconds,
|
EvaluationTime: all[4].GetAlert().EvaluationDurationSeconds,
|
||||||
Duration: all[4].GetAlert().DurationSeconds,
|
Duration: all[4].GetAlert().DurationSeconds,
|
||||||
KeepFiringFor: all[4].GetAlert().KeepFiringForSeconds,
|
KeepFiringFor: all[4].GetAlert().KeepFiringForSeconds,
|
||||||
Annotations: nil,
|
Annotations: labels.EmptyLabels(),
|
||||||
Alerts: []*testpromcompatibility.Alert{},
|
Alerts: []*testpromcompatibility.Alert{},
|
||||||
Type: "alerting",
|
Type: "alerting",
|
||||||
},
|
},
|
||||||
|
|
|
@ -145,7 +145,7 @@ func TestUpload(t *testing.T) {
|
||||||
testutil.Equals(t, 3, len(bkt.Objects()))
|
testutil.Equals(t, 3, len(bkt.Objects()))
|
||||||
testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]))
|
testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]))
|
||||||
testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]))
|
testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]))
|
||||||
testutil.Equals(t, 595, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
|
testutil.Equals(t, 621, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
|
||||||
|
|
||||||
// File stats are gathered.
|
// File stats are gathered.
|
||||||
testutil.Equals(t, fmt.Sprintf(`{
|
testutil.Equals(t, fmt.Sprintf(`{
|
||||||
|
@ -154,6 +154,7 @@ func TestUpload(t *testing.T) {
|
||||||
"maxTime": 1000,
|
"maxTime": 1000,
|
||||||
"stats": {
|
"stats": {
|
||||||
"numSamples": 500,
|
"numSamples": 500,
|
||||||
|
"numFloatSamples": 500,
|
||||||
"numSeries": 5,
|
"numSeries": 5,
|
||||||
"numChunks": 5
|
"numChunks": 5
|
||||||
},
|
},
|
||||||
|
@ -198,7 +199,7 @@ func TestUpload(t *testing.T) {
|
||||||
testutil.Equals(t, 3, len(bkt.Objects()))
|
testutil.Equals(t, 3, len(bkt.Objects()))
|
||||||
testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]))
|
testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]))
|
||||||
testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]))
|
testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]))
|
||||||
testutil.Equals(t, 595, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
|
testutil.Equals(t, 621, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// Upload with no external labels should be blocked.
|
// Upload with no external labels should be blocked.
|
||||||
|
@ -230,7 +231,7 @@ func TestUpload(t *testing.T) {
|
||||||
testutil.Equals(t, 6, len(bkt.Objects()))
|
testutil.Equals(t, 6, len(bkt.Objects()))
|
||||||
testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b2.String(), ChunksDirname, "000001")]))
|
testutil.Equals(t, 3727, len(bkt.Objects()[path.Join(b2.String(), ChunksDirname, "000001")]))
|
||||||
testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b2.String(), IndexFilename)]))
|
testutil.Equals(t, 401, len(bkt.Objects()[path.Join(b2.String(), IndexFilename)]))
|
||||||
testutil.Equals(t, 574, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)]))
|
testutil.Equals(t, 600, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,8 +589,8 @@ type errBucket struct {
|
||||||
failSuffix string
|
failSuffix string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (eb errBucket) Upload(ctx context.Context, name string, r io.Reader) error {
|
func (eb errBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error {
|
||||||
err := eb.Bucket.Upload(ctx, name, r)
|
err := eb.Bucket.Upload(ctx, name, r, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,8 @@ const FetcherConcurrency = 32
|
||||||
// to allow depending projects (eg. Cortex) to implement their own custom metadata fetcher while tracking
|
// to allow depending projects (eg. Cortex) to implement their own custom metadata fetcher while tracking
|
||||||
// compatible metrics.
|
// compatible metrics.
|
||||||
type BaseFetcherMetrics struct {
|
type BaseFetcherMetrics struct {
|
||||||
Syncs prometheus.Counter
|
Syncs prometheus.Counter
|
||||||
|
CacheBusts prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetcherMetrics holds metrics tracked by the metadata fetcher. This struct and its fields are exported
|
// FetcherMetrics holds metrics tracked by the metadata fetcher. This struct and its fields are exported
|
||||||
|
@ -92,6 +93,9 @@ const (
|
||||||
// MarkedForNoDownsampleMeta is label for blocks which are loaded but also marked for no downsample. This label is also counted in `loaded` label metric.
|
// MarkedForNoDownsampleMeta is label for blocks which are loaded but also marked for no downsample. This label is also counted in `loaded` label metric.
|
||||||
MarkedForNoDownsampleMeta = "marked-for-no-downsample"
|
MarkedForNoDownsampleMeta = "marked-for-no-downsample"
|
||||||
|
|
||||||
|
// ParquetMigratedMeta is label for blocks which are marked as migrated to parquet format.
|
||||||
|
ParquetMigratedMeta = "parquet-migrated"
|
||||||
|
|
||||||
// Modified label values.
|
// Modified label values.
|
||||||
replicaRemovedMeta = "replica-label-removed"
|
replicaRemovedMeta = "replica-label-removed"
|
||||||
)
|
)
|
||||||
|
@ -104,6 +108,11 @@ func NewBaseFetcherMetrics(reg prometheus.Registerer) *BaseFetcherMetrics {
|
||||||
Name: "base_syncs_total",
|
Name: "base_syncs_total",
|
||||||
Help: "Total blocks metadata synchronization attempts by base Fetcher",
|
Help: "Total blocks metadata synchronization attempts by base Fetcher",
|
||||||
})
|
})
|
||||||
|
m.CacheBusts = promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||||
|
Subsystem: FetcherSubSys,
|
||||||
|
Name: "base_cache_busts_total",
|
||||||
|
Help: "Total blocks metadata cache busts by base Fetcher",
|
||||||
|
})
|
||||||
|
|
||||||
return &m
|
return &m
|
||||||
}
|
}
|
||||||
|
@ -162,6 +171,7 @@ func DefaultSyncedStateLabelValues() [][]string {
|
||||||
{duplicateMeta},
|
{duplicateMeta},
|
||||||
{MarkedForDeletionMeta},
|
{MarkedForDeletionMeta},
|
||||||
{MarkedForNoCompactionMeta},
|
{MarkedForNoCompactionMeta},
|
||||||
|
{ParquetMigratedMeta},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +185,7 @@ func DefaultModifiedLabelValues() [][]string {
|
||||||
type Lister interface {
|
type Lister interface {
|
||||||
// GetActiveAndPartialBlockIDs GetActiveBlocksIDs returning it via channel (streaming) and response.
|
// GetActiveAndPartialBlockIDs GetActiveBlocksIDs returning it via channel (streaming) and response.
|
||||||
// Active blocks are blocks which contain meta.json, while partial blocks are blocks without meta.json
|
// Active blocks are blocks which contain meta.json, while partial blocks are blocks without meta.json
|
||||||
GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error)
|
GetActiveAndPartialBlockIDs(ctx context.Context, activeBlocks chan<- ActiveBlockFetchData) (partialBlocks map[ulid.ULID]bool, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecursiveLister lists block IDs by recursively iterating through a bucket.
|
// RecursiveLister lists block IDs by recursively iterating through a bucket.
|
||||||
|
@ -191,9 +201,17 @@ func NewRecursiveLister(logger log.Logger, bkt objstore.InstrumentedBucketReader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *RecursiveLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) {
|
type ActiveBlockFetchData struct {
|
||||||
|
lastModified time.Time
|
||||||
|
ulid.ULID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *RecursiveLister) GetActiveAndPartialBlockIDs(ctx context.Context, activeBlocks chan<- ActiveBlockFetchData) (partialBlocks map[ulid.ULID]bool, err error) {
|
||||||
partialBlocks = make(map[ulid.ULID]bool)
|
partialBlocks = make(map[ulid.ULID]bool)
|
||||||
err = f.bkt.Iter(ctx, "", func(name string) error {
|
|
||||||
|
err = f.bkt.IterWithAttributes(ctx, "", func(attrs objstore.IterObjectAttributes) error {
|
||||||
|
name := attrs.Name
|
||||||
|
|
||||||
parts := strings.Split(name, "/")
|
parts := strings.Split(name, "/")
|
||||||
dir, file := parts[0], parts[len(parts)-1]
|
dir, file := parts[0], parts[len(parts)-1]
|
||||||
id, ok := IsBlockDir(dir)
|
id, ok := IsBlockDir(dir)
|
||||||
|
@ -206,15 +224,20 @@ func (f *RecursiveLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch ch
|
||||||
if !IsBlockMetaFile(file) {
|
if !IsBlockMetaFile(file) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
partialBlocks[id] = false
|
|
||||||
|
lastModified, _ := attrs.LastModified()
|
||||||
|
delete(partialBlocks, id)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case ch <- id:
|
case activeBlocks <- ActiveBlockFetchData{
|
||||||
|
ULID: id,
|
||||||
|
lastModified: lastModified,
|
||||||
|
}:
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, objstore.WithRecursiveIter())
|
}, objstore.WithUpdatedAt(), objstore.WithRecursiveIter())
|
||||||
return partialBlocks, err
|
return partialBlocks, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,10 +255,11 @@ func NewConcurrentLister(logger log.Logger, bkt objstore.InstrumentedBucketReade
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) {
|
func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, activeBlocks chan<- ActiveBlockFetchData) (partialBlocks map[ulid.ULID]bool, err error) {
|
||||||
const concurrency = 64
|
const concurrency = 64
|
||||||
|
|
||||||
partialBlocks = make(map[ulid.ULID]bool)
|
partialBlocks = make(map[ulid.ULID]bool)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
metaChan = make(chan ulid.ULID, concurrency)
|
metaChan = make(chan ulid.ULID, concurrency)
|
||||||
eg, gCtx = errgroup.WithContext(ctx)
|
eg, gCtx = errgroup.WithContext(ctx)
|
||||||
|
@ -258,10 +282,14 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-gCtx.Done():
|
case <-gCtx.Done():
|
||||||
return gCtx.Err()
|
return gCtx.Err()
|
||||||
case ch <- uid:
|
case activeBlocks <- ActiveBlockFetchData{
|
||||||
|
ULID: uid,
|
||||||
|
lastModified: time.Time{}, // Not used, cache busting is only implemented by the recursive lister because otherwise we would have to call Attributes() (one extra call).
|
||||||
|
}:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -314,12 +342,16 @@ type BaseFetcher struct {
|
||||||
blockIDsLister Lister
|
blockIDsLister Lister
|
||||||
|
|
||||||
// Optional local directory to cache meta.json files.
|
// Optional local directory to cache meta.json files.
|
||||||
cacheDir string
|
cacheDir string
|
||||||
syncs prometheus.Counter
|
syncs prometheus.Counter
|
||||||
g singleflight.Group
|
cacheBusts prometheus.Counter
|
||||||
|
g singleflight.Group
|
||||||
|
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
cached map[ulid.ULID]*metadata.Meta
|
|
||||||
|
cached *sync.Map
|
||||||
|
|
||||||
|
modifiedTimestamps map[ulid.ULID]time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBaseFetcher constructs BaseFetcher.
|
// NewBaseFetcher constructs BaseFetcher.
|
||||||
|
@ -347,8 +379,9 @@ func NewBaseFetcherWithMetrics(logger log.Logger, concurrency int, bkt objstore.
|
||||||
bkt: bkt,
|
bkt: bkt,
|
||||||
blockIDsLister: blockIDsLister,
|
blockIDsLister: blockIDsLister,
|
||||||
cacheDir: cacheDir,
|
cacheDir: cacheDir,
|
||||||
cached: map[ulid.ULID]*metadata.Meta{},
|
cached: &sync.Map{},
|
||||||
syncs: metrics.Syncs,
|
syncs: metrics.Syncs,
|
||||||
|
cacheBusts: metrics.CacheBusts,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,6 +424,22 @@ var (
|
||||||
ErrorSyncMetaCorrupted = errors.New("meta.json corrupted")
|
ErrorSyncMetaCorrupted = errors.New("meta.json corrupted")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (f *BaseFetcher) metaUpdated(id ulid.ULID, modified time.Time) bool {
|
||||||
|
if f.modifiedTimestamps[id].IsZero() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return !f.modifiedTimestamps[id].Equal(modified)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *BaseFetcher) bustCacheForID(id ulid.ULID) {
|
||||||
|
f.cacheBusts.Inc()
|
||||||
|
|
||||||
|
f.cached.Delete(id)
|
||||||
|
if err := os.RemoveAll(filepath.Join(f.cacheDir, id.String())); err != nil {
|
||||||
|
level.Warn(f.logger).Log("msg", "failed to remove cached meta.json dir", "dir", filepath.Join(f.cacheDir, id.String()), "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// loadMeta returns metadata from object storage or error.
|
// loadMeta returns metadata from object storage or error.
|
||||||
// It returns `ErrorSyncMetaNotFound` and `ErrorSyncMetaCorrupted` sentinel errors in those cases.
|
// It returns `ErrorSyncMetaNotFound` and `ErrorSyncMetaCorrupted` sentinel errors in those cases.
|
||||||
func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Meta, error) {
|
func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Meta, error) {
|
||||||
|
@ -399,8 +448,8 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met
|
||||||
cachedBlockDir = filepath.Join(f.cacheDir, id.String())
|
cachedBlockDir = filepath.Join(f.cacheDir, id.String())
|
||||||
)
|
)
|
||||||
|
|
||||||
if m, seen := f.cached[id]; seen {
|
if m, seen := f.cached.Load(id); seen {
|
||||||
return m, nil
|
return m.(*metadata.Meta), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Best effort load from local dir.
|
// Best effort load from local dir.
|
||||||
|
@ -457,8 +506,9 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met
|
||||||
}
|
}
|
||||||
|
|
||||||
type response struct {
|
type response struct {
|
||||||
metas map[ulid.ULID]*metadata.Meta
|
metas map[ulid.ULID]*metadata.Meta
|
||||||
partial map[ulid.ULID]error
|
partial map[ulid.ULID]error
|
||||||
|
modifiedTimestamps map[ulid.ULID]time.Time
|
||||||
// If metaErr > 0 it means incomplete view, so some metas, failed to be loaded.
|
// If metaErr > 0 it means incomplete view, so some metas, failed to be loaded.
|
||||||
metaErrs errutil.MultiError
|
metaErrs errutil.MultiError
|
||||||
|
|
||||||
|
@ -471,21 +521,29 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
resp = response{
|
resp = response{
|
||||||
metas: make(map[ulid.ULID]*metadata.Meta),
|
metas: make(map[ulid.ULID]*metadata.Meta),
|
||||||
partial: make(map[ulid.ULID]error),
|
partial: make(map[ulid.ULID]error),
|
||||||
|
modifiedTimestamps: make(map[ulid.ULID]time.Time),
|
||||||
}
|
}
|
||||||
eg errgroup.Group
|
eg errgroup.Group
|
||||||
ch = make(chan ulid.ULID, f.concurrency)
|
activeBlocksCh = make(chan ActiveBlockFetchData, f.concurrency)
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
)
|
)
|
||||||
level.Debug(f.logger).Log("msg", "fetching meta data", "concurrency", f.concurrency)
|
level.Debug(f.logger).Log("msg", "fetching meta data", "concurrency", f.concurrency)
|
||||||
for i := 0; i < f.concurrency; i++ {
|
for i := 0; i < f.concurrency; i++ {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
for id := range ch {
|
for activeBlockFetchMD := range activeBlocksCh {
|
||||||
|
id := activeBlockFetchMD.ULID
|
||||||
|
|
||||||
|
if f.metaUpdated(id, activeBlockFetchMD.lastModified) {
|
||||||
|
f.bustCacheForID(id)
|
||||||
|
}
|
||||||
|
|
||||||
meta, err := f.loadMeta(ctx, id)
|
meta, err := f.loadMeta(ctx, id)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
mtx.Lock()
|
mtx.Lock()
|
||||||
resp.metas[id] = meta
|
resp.metas[id] = meta
|
||||||
|
resp.modifiedTimestamps[id] = activeBlockFetchMD.lastModified
|
||||||
mtx.Unlock()
|
mtx.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -518,8 +576,8 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) {
|
||||||
var err error
|
var err error
|
||||||
// Workers scheduled, distribute blocks.
|
// Workers scheduled, distribute blocks.
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
defer close(ch)
|
defer close(activeBlocksCh)
|
||||||
partialBlocks, err = f.blockIDsLister.GetActiveAndPartialBlockIDs(ctx, ch)
|
partialBlocks, err = f.blockIDsLister.GetActiveAndPartialBlockIDs(ctx, activeBlocksCh)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -541,13 +599,20 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only for complete view of blocks update the cache.
|
// Only for complete view of blocks update the cache.
|
||||||
cached := make(map[ulid.ULID]*metadata.Meta, len(resp.metas))
|
|
||||||
|
cached := &sync.Map{}
|
||||||
for id, m := range resp.metas {
|
for id, m := range resp.metas {
|
||||||
cached[id] = m
|
cached.Store(id, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedTimestamps := make(map[ulid.ULID]time.Time, len(resp.modifiedTimestamps))
|
||||||
|
for id, ts := range resp.modifiedTimestamps {
|
||||||
|
modifiedTimestamps[id] = ts
|
||||||
}
|
}
|
||||||
|
|
||||||
f.mtx.Lock()
|
f.mtx.Lock()
|
||||||
f.cached = cached
|
f.cached = cached
|
||||||
|
f.modifiedTimestamps = modifiedTimestamps
|
||||||
f.mtx.Unlock()
|
f.mtx.Unlock()
|
||||||
|
|
||||||
// Best effort cleanup of disk-cached metas.
|
// Best effort cleanup of disk-cached metas.
|
||||||
|
@ -632,8 +697,12 @@ func (f *BaseFetcher) fetch(ctx context.Context, metrics *FetcherMetrics, filter
|
||||||
func (f *BaseFetcher) countCached() int {
|
func (f *BaseFetcher) countCached() int {
|
||||||
f.mtx.Lock()
|
f.mtx.Lock()
|
||||||
defer f.mtx.Unlock()
|
defer f.mtx.Unlock()
|
||||||
|
var i int
|
||||||
return len(f.cached)
|
f.cached.Range(func(_, _ interface{}) bool {
|
||||||
|
i++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return i
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetaFetcher struct {
|
type MetaFetcher struct {
|
||||||
|
@ -1086,3 +1155,46 @@ func ParseRelabelConfig(contentYaml []byte, supportedActions map[relabel.Action]
|
||||||
|
|
||||||
return relabelConfig, nil
|
return relabelConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ MetadataFilter = &ParquetMigratedMetaFilter{}
|
||||||
|
|
||||||
|
// ParquetMigratedMetaFilter is a metadata filter that filters out blocks that have been
|
||||||
|
// migrated to parquet format. The filter checks for the presence of the parquet_migrated
|
||||||
|
// extension key with a value of true.
|
||||||
|
// Not go-routine safe.
|
||||||
|
type ParquetMigratedMetaFilter struct {
|
||||||
|
logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewParquetMigratedMetaFilter creates a new ParquetMigratedMetaFilter.
|
||||||
|
func NewParquetMigratedMetaFilter(logger log.Logger) *ParquetMigratedMetaFilter {
|
||||||
|
return &ParquetMigratedMetaFilter{
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter filters out blocks that have been marked as migrated to parquet format.
|
||||||
|
func (f *ParquetMigratedMetaFilter) Filter(_ context.Context, metas map[ulid.ULID]*metadata.Meta, synced GaugeVec, modified GaugeVec) error {
|
||||||
|
for id, meta := range metas {
|
||||||
|
if meta.Thanos.Extensions == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
extensionsMap, ok := meta.Thanos.Extensions.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
parquetMigrated, exists := extensionsMap[metadata.ParquetMigratedExtensionKey]
|
||||||
|
if !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if migratedBool, ok := parquetMigrated.(bool); ok && migratedBool {
|
||||||
|
level.Debug(f.logger).Log("msg", "filtering out parquet migrated block", "block", id)
|
||||||
|
synced.WithLabelValues(ParquetMigratedMeta).Inc()
|
||||||
|
delete(metas, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
promtest "github.com/prometheus/client_golang/prometheus/testutil"
|
promtest "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/thanos-io/objstore"
|
"github.com/thanos-io/objstore"
|
||||||
|
@ -72,30 +73,38 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
var ulidToDelete ulid.ULID
|
var ulidToDelete ulid.ULID
|
||||||
r := prometheus.NewRegistry()
|
|
||||||
noopLogger := log.NewNopLogger()
|
noopLogger := log.NewNopLogger()
|
||||||
insBkt := objstore.WithNoopInstr(bkt)
|
insBkt := objstore.WithNoopInstr(bkt)
|
||||||
baseBlockIDsFetcher := NewConcurrentLister(noopLogger, insBkt)
|
|
||||||
baseFetcher, err := NewBaseFetcher(noopLogger, 20, insBkt, baseBlockIDsFetcher, dir, r)
|
r := prometheus.NewRegistry()
|
||||||
|
|
||||||
|
recursiveLister := NewRecursiveLister(noopLogger, insBkt)
|
||||||
|
recursiveBaseFetcher, err := NewBaseFetcher(noopLogger, 20, insBkt, recursiveLister, dir, r)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
fetcher := baseFetcher.NewMetaFetcher(r, []MetadataFilter{
|
recursiveFetcher := recursiveBaseFetcher.NewMetaFetcher(r, []MetadataFilter{
|
||||||
&ulidFilter{ulidToDelete: &ulidToDelete},
|
&ulidFilter{ulidToDelete: &ulidToDelete},
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
for i, tcase := range []struct {
|
for _, tcase := range []struct {
|
||||||
name string
|
name string
|
||||||
do func()
|
do func(cleanCache func())
|
||||||
filterULID ulid.ULID
|
filterULID ulid.ULID
|
||||||
expectedMetas []ulid.ULID
|
expectedMetas []ulid.ULID
|
||||||
expectedCorruptedMeta []ulid.ULID
|
expectedCorruptedMeta []ulid.ULID
|
||||||
expectedNoMeta []ulid.ULID
|
expectedNoMeta []ulid.ULID
|
||||||
expectedFiltered int
|
expectedFiltered int
|
||||||
expectedMetaErr error
|
expectedMetaErr error
|
||||||
|
expectedCacheBusts int
|
||||||
|
expectedSyncs int
|
||||||
|
|
||||||
|
// If this is set then use it.
|
||||||
|
fetcher *MetaFetcher
|
||||||
|
baseFetcher *BaseFetcher
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "empty bucket",
|
name: "empty bucket",
|
||||||
do: func() {},
|
do: func(_ func()) {},
|
||||||
|
|
||||||
expectedMetas: ULIDs(),
|
expectedMetas: ULIDs(),
|
||||||
expectedCorruptedMeta: ULIDs(),
|
expectedCorruptedMeta: ULIDs(),
|
||||||
|
@ -103,7 +112,7 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "3 metas in bucket",
|
name: "3 metas in bucket",
|
||||||
do: func() {
|
do: func(_ func()) {
|
||||||
var meta metadata.Meta
|
var meta metadata.Meta
|
||||||
meta.Version = 1
|
meta.Version = 1
|
||||||
meta.ULID = ULID(1)
|
meta.ULID = ULID(1)
|
||||||
|
@ -126,28 +135,8 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
expectedNoMeta: ULIDs(),
|
expectedNoMeta: ULIDs(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "nothing changed",
|
name: "meta 2 and 3 have corrupted data on disk ",
|
||||||
do: func() {},
|
do: func(cleanCache func()) {
|
||||||
|
|
||||||
expectedMetas: ULIDs(1, 2, 3),
|
|
||||||
expectedCorruptedMeta: ULIDs(),
|
|
||||||
expectedNoMeta: ULIDs(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "fresh cache",
|
|
||||||
do: func() {
|
|
||||||
baseFetcher.cached = map[ulid.ULID]*metadata.Meta{}
|
|
||||||
},
|
|
||||||
|
|
||||||
expectedMetas: ULIDs(1, 2, 3),
|
|
||||||
expectedCorruptedMeta: ULIDs(),
|
|
||||||
expectedNoMeta: ULIDs(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "fresh cache: meta 2 and 3 have corrupted data on disk ",
|
|
||||||
do: func() {
|
|
||||||
baseFetcher.cached = map[ulid.ULID]*metadata.Meta{}
|
|
||||||
|
|
||||||
testutil.Ok(t, os.Remove(filepath.Join(dir, "meta-syncer", ULID(2).String(), MetaFilename)))
|
testutil.Ok(t, os.Remove(filepath.Join(dir, "meta-syncer", ULID(2).String(), MetaFilename)))
|
||||||
|
|
||||||
f, err := os.OpenFile(filepath.Join(dir, "meta-syncer", ULID(3).String(), MetaFilename), os.O_WRONLY, os.ModePerm)
|
f, err := os.OpenFile(filepath.Join(dir, "meta-syncer", ULID(3).String(), MetaFilename), os.O_WRONLY, os.ModePerm)
|
||||||
|
@ -164,7 +153,7 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "block without meta",
|
name: "block without meta",
|
||||||
do: func() {
|
do: func(_ func()) {
|
||||||
testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(4).String(), "some-file"), bytes.NewBuffer([]byte("something"))))
|
testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(4).String(), "some-file"), bytes.NewBuffer([]byte("something"))))
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -174,7 +163,7 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "corrupted meta.json",
|
name: "corrupted meta.json",
|
||||||
do: func() {
|
do: func(_ func()) {
|
||||||
testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(5).String(), MetaFilename), bytes.NewBuffer([]byte("{ not a json"))))
|
testutil.Ok(t, bkt.Upload(ctx, path.Join(ULID(5).String(), MetaFilename), bytes.NewBuffer([]byte("{ not a json"))))
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -182,46 +171,71 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
expectedCorruptedMeta: ULIDs(5),
|
expectedCorruptedMeta: ULIDs(5),
|
||||||
expectedNoMeta: ULIDs(4),
|
expectedNoMeta: ULIDs(4),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "some added some deleted",
|
|
||||||
do: func() {
|
|
||||||
testutil.Ok(t, Delete(ctx, log.NewNopLogger(), bkt, ULID(2)))
|
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "filter not existing ulid",
|
||||||
|
do: func(_ func()) {},
|
||||||
|
filterULID: ULID(10),
|
||||||
|
|
||||||
|
expectedMetas: ULIDs(1, 2, 3),
|
||||||
|
expectedCorruptedMeta: ULIDs(5),
|
||||||
|
expectedNoMeta: ULIDs(4),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "filter ulid 1",
|
||||||
|
do: func(_ func()) {
|
||||||
var meta metadata.Meta
|
var meta metadata.Meta
|
||||||
meta.Version = 1
|
meta.Version = 1
|
||||||
meta.ULID = ULID(6)
|
meta.ULID = ULID(1)
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta))
|
testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta))
|
||||||
testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf))
|
testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf))
|
||||||
},
|
},
|
||||||
|
|
||||||
expectedMetas: ULIDs(1, 3, 6),
|
|
||||||
expectedCorruptedMeta: ULIDs(5),
|
|
||||||
expectedNoMeta: ULIDs(4),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "filter not existing ulid",
|
|
||||||
do: func() {},
|
|
||||||
filterULID: ULID(10),
|
|
||||||
|
|
||||||
expectedMetas: ULIDs(1, 3, 6),
|
|
||||||
expectedCorruptedMeta: ULIDs(5),
|
|
||||||
expectedNoMeta: ULIDs(4),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "filter ulid 1",
|
|
||||||
do: func() {},
|
|
||||||
filterULID: ULID(1),
|
filterULID: ULID(1),
|
||||||
|
|
||||||
expectedMetas: ULIDs(3, 6),
|
expectedMetas: ULIDs(2, 3),
|
||||||
expectedCorruptedMeta: ULIDs(5),
|
expectedCorruptedMeta: ULIDs(5),
|
||||||
expectedNoMeta: ULIDs(4),
|
expectedNoMeta: ULIDs(4),
|
||||||
expectedFiltered: 1,
|
expectedFiltered: 1,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "use recursive lister",
|
||||||
|
do: func(cleanCache func()) {
|
||||||
|
cleanCache()
|
||||||
|
},
|
||||||
|
fetcher: recursiveFetcher,
|
||||||
|
baseFetcher: recursiveBaseFetcher,
|
||||||
|
|
||||||
|
expectedMetas: ULIDs(1, 2, 3),
|
||||||
|
expectedCorruptedMeta: ULIDs(5),
|
||||||
|
expectedNoMeta: ULIDs(4),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "update timestamp, expect a cache bust",
|
||||||
|
do: func(_ func()) {
|
||||||
|
var meta metadata.Meta
|
||||||
|
meta.Version = 1
|
||||||
|
meta.MaxTime = 123456
|
||||||
|
meta.ULID = ULID(1)
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
testutil.Ok(t, json.NewEncoder(&buf).Encode(&meta))
|
||||||
|
testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf))
|
||||||
|
},
|
||||||
|
fetcher: recursiveFetcher,
|
||||||
|
baseFetcher: recursiveBaseFetcher,
|
||||||
|
|
||||||
|
expectedMetas: ULIDs(1, 2, 3),
|
||||||
|
expectedCorruptedMeta: ULIDs(5),
|
||||||
|
expectedNoMeta: ULIDs(4),
|
||||||
|
expectedFiltered: 0,
|
||||||
|
expectedCacheBusts: 1,
|
||||||
|
expectedSyncs: 2,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "error: not supported meta version",
|
name: "error: not supported meta version",
|
||||||
do: func() {
|
do: func(_ func()) {
|
||||||
var meta metadata.Meta
|
var meta metadata.Meta
|
||||||
meta.Version = 20
|
meta.Version = 20
|
||||||
meta.ULID = ULID(7)
|
meta.ULID = ULID(7)
|
||||||
|
@ -231,14 +245,40 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf))
|
testutil.Ok(t, bkt.Upload(ctx, path.Join(meta.ULID.String(), metadata.MetaFilename), &buf))
|
||||||
},
|
},
|
||||||
|
|
||||||
expectedMetas: ULIDs(1, 3, 6),
|
expectedMetas: ULIDs(1, 2, 3),
|
||||||
expectedCorruptedMeta: ULIDs(5),
|
expectedCorruptedMeta: ULIDs(5),
|
||||||
expectedNoMeta: ULIDs(4),
|
expectedNoMeta: ULIDs(4),
|
||||||
expectedMetaErr: errors.New("incomplete view: unexpected meta file: 00000000070000000000000000/meta.json version: 20"),
|
expectedMetaErr: errors.New("incomplete view: unexpected meta file: 00000000070000000000000000/meta.json version: 20"),
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
if ok := t.Run(tcase.name, func(t *testing.T) {
|
if ok := t.Run(tcase.name, func(t *testing.T) {
|
||||||
tcase.do()
|
r := prometheus.NewRegistry()
|
||||||
|
|
||||||
|
var fetcher *MetaFetcher
|
||||||
|
var baseFetcher *BaseFetcher
|
||||||
|
|
||||||
|
if tcase.baseFetcher != nil {
|
||||||
|
baseFetcher = tcase.baseFetcher
|
||||||
|
} else {
|
||||||
|
lister := NewConcurrentLister(noopLogger, insBkt)
|
||||||
|
bf, err := NewBaseFetcher(noopLogger, 20, insBkt, lister, dir, r)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
baseFetcher = bf
|
||||||
|
}
|
||||||
|
|
||||||
|
if tcase.fetcher != nil {
|
||||||
|
fetcher = tcase.fetcher
|
||||||
|
} else {
|
||||||
|
fetcher = baseFetcher.NewMetaFetcher(r, []MetadataFilter{
|
||||||
|
&ulidFilter{ulidToDelete: &ulidToDelete},
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
tcase.do(func() {
|
||||||
|
baseFetcher.cached.Clear()
|
||||||
|
testutil.Ok(t, os.RemoveAll(filepath.Join(dir, "meta-syncer")))
|
||||||
|
})
|
||||||
|
|
||||||
ulidToDelete = tcase.filterULID
|
ulidToDelete = tcase.filterULID
|
||||||
metas, partial, err := fetcher.Fetch(ctx)
|
metas, partial, err := fetcher.Fetch(ctx)
|
||||||
|
@ -282,8 +322,10 @@ func TestMetaFetcher_Fetch(t *testing.T) {
|
||||||
if tcase.expectedMetaErr != nil {
|
if tcase.expectedMetaErr != nil {
|
||||||
expectedFailures = 1
|
expectedFailures = 1
|
||||||
}
|
}
|
||||||
testutil.Equals(t, float64(i+1), promtest.ToFloat64(baseFetcher.syncs))
|
|
||||||
testutil.Equals(t, float64(i+1), promtest.ToFloat64(fetcher.metrics.Syncs))
|
testutil.Equals(t, float64(max(1, tcase.expectedSyncs)), promtest.ToFloat64(baseFetcher.syncs))
|
||||||
|
testutil.Equals(t, float64(tcase.expectedCacheBusts), promtest.ToFloat64(baseFetcher.cacheBusts))
|
||||||
|
testutil.Equals(t, float64(max(1, tcase.expectedSyncs)), promtest.ToFloat64(fetcher.metrics.Syncs))
|
||||||
testutil.Equals(t, float64(len(tcase.expectedMetas)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(LoadedMeta)))
|
testutil.Equals(t, float64(len(tcase.expectedMetas)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(LoadedMeta)))
|
||||||
testutil.Equals(t, float64(len(tcase.expectedNoMeta)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(NoMeta)))
|
testutil.Equals(t, float64(len(tcase.expectedNoMeta)), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues(NoMeta)))
|
||||||
testutil.Equals(t, float64(tcase.expectedFiltered), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues("filtered")))
|
testutil.Equals(t, float64(tcase.expectedFiltered), promtest.ToFloat64(fetcher.metrics.Synced.WithLabelValues("filtered")))
|
||||||
|
@ -1212,3 +1254,157 @@ func Test_ParseRelabelConfig(t *testing.T) {
|
||||||
testutil.NotOk(t, err)
|
testutil.NotOk(t, err)
|
||||||
testutil.Equals(t, "unsupported relabel action: labelmap", err.Error())
|
testutil.Equals(t, "unsupported relabel action: labelmap", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParquetMigratedMetaFilter_Filter(t *testing.T) {
|
||||||
|
logger := log.NewNopLogger()
|
||||||
|
filter := NewParquetMigratedMetaFilter(logger)
|
||||||
|
|
||||||
|
// Simulate what might happen when extensions are loaded from JSON
|
||||||
|
extensions := struct {
|
||||||
|
ParquetMigrated bool `json:"parquet_migrated"`
|
||||||
|
}{
|
||||||
|
ParquetMigrated: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range []struct {
|
||||||
|
name string
|
||||||
|
metas map[ulid.ULID]*metadata.Meta
|
||||||
|
check func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "block with other extensions",
|
||||||
|
metas: map[ulid.ULID]*metadata.Meta{
|
||||||
|
ulid.MustNew(2, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: map[string]interface{}{
|
||||||
|
"other_key": "other_value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error) {
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 1, len(metas))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no extensions",
|
||||||
|
metas: map[ulid.ULID]*metadata.Meta{
|
||||||
|
ulid.MustNew(1, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error) {
|
||||||
|
testutil.Equals(t, 1, len(metas))
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "block with parquet_migrated=false",
|
||||||
|
metas: map[ulid.ULID]*metadata.Meta{
|
||||||
|
ulid.MustNew(3, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: map[string]interface{}{
|
||||||
|
metadata.ParquetMigratedExtensionKey: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error) {
|
||||||
|
testutil.Equals(t, 1, len(metas))
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "block with parquet_migrated=true",
|
||||||
|
metas: map[ulid.ULID]*metadata.Meta{
|
||||||
|
ulid.MustNew(4, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: map[string]interface{}{
|
||||||
|
metadata.ParquetMigratedExtensionKey: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error) {
|
||||||
|
testutil.Equals(t, 0, len(metas))
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mixed blocks with parquet_migrated",
|
||||||
|
metas: map[ulid.ULID]*metadata.Meta{
|
||||||
|
ulid.MustNew(5, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: map[string]interface{}{
|
||||||
|
metadata.ParquetMigratedExtensionKey: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ulid.MustNew(6, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: map[string]interface{}{
|
||||||
|
metadata.ParquetMigratedExtensionKey: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ulid.MustNew(7, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error) {
|
||||||
|
testutil.Equals(t, 2, len(metas))
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
testutil.Assert(t, metas[ulid.MustNew(6, nil)] != nil, "Expected block with parquet_migrated=false to remain")
|
||||||
|
testutil.Assert(t, metas[ulid.MustNew(7, nil)] != nil, "Expected block without extensions to remain")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "block with serialized extensions",
|
||||||
|
metas: map[ulid.ULID]*metadata.Meta{
|
||||||
|
ulid.MustNew(8, nil): {
|
||||||
|
Thanos: metadata.Thanos{
|
||||||
|
Extensions: extensions,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, metas map[ulid.ULID]*metadata.Meta, err error) {
|
||||||
|
testutil.Equals(t, 0, len(metas))
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
r := prometheus.NewRegistry()
|
||||||
|
|
||||||
|
synced := promauto.With(r).NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "test_synced",
|
||||||
|
Help: "Test synced metric",
|
||||||
|
},
|
||||||
|
[]string{"state"},
|
||||||
|
)
|
||||||
|
modified := promauto.With(r).NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "test_modified",
|
||||||
|
Help: "Test modified metric",
|
||||||
|
},
|
||||||
|
[]string{"state"},
|
||||||
|
)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
m, err := json.Marshal(c.metas)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
var outmetas map[ulid.ULID]*metadata.Meta
|
||||||
|
testutil.Ok(t, json.Unmarshal(m, &outmetas))
|
||||||
|
|
||||||
|
err = filter.Filter(ctx, outmetas, synced, modified)
|
||||||
|
c.check(t, outmetas, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -248,7 +248,7 @@ func GatherIndexHealthStats(ctx context.Context, logger log.Logger, fn string, m
|
||||||
}
|
}
|
||||||
stats.LabelNamesCount = int64(len(lnames))
|
stats.LabelNamesCount = int64(len(lnames))
|
||||||
|
|
||||||
lvals, err := r.LabelValues(ctx, "__name__")
|
lvals, err := r.LabelValues(ctx, "__name__", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, errors.Wrap(err, "metric label values")
|
return stats, errors.Wrap(err, "metric label values")
|
||||||
}
|
}
|
||||||
|
|
|
@ -307,7 +307,7 @@ func compareIndexToHeader(t *testing.T, indexByteSlice index.ByteSlice, headerRe
|
||||||
minStart := int64(math.MaxInt64)
|
minStart := int64(math.MaxInt64)
|
||||||
maxEnd := int64(math.MinInt64)
|
maxEnd := int64(math.MinInt64)
|
||||||
for il, lname := range expLabelNames {
|
for il, lname := range expLabelNames {
|
||||||
expectedLabelVals, err := indexReader.SortedLabelValues(ctx, lname)
|
expectedLabelVals, err := indexReader.SortedLabelValues(ctx, lname, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
vals, err := headerReader.LabelValues(lname)
|
vals, err := headerReader.LabelValues(lname)
|
||||||
|
|
|
@ -7,11 +7,14 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/oklog/ulid/v2"
|
"github.com/oklog/ulid/v2"
|
||||||
|
|
||||||
|
xsync "golang.org/x/sync/singleflight"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/thanos-io/objstore"
|
"github.com/thanos-io/objstore"
|
||||||
|
@ -49,6 +52,7 @@ type ReaderPool struct {
|
||||||
// Keep track of all readers managed by the pool.
|
// Keep track of all readers managed by the pool.
|
||||||
lazyReadersMx sync.Mutex
|
lazyReadersMx sync.Mutex
|
||||||
lazyReaders map[*LazyBinaryReader]struct{}
|
lazyReaders map[*LazyBinaryReader]struct{}
|
||||||
|
lazyReadersSF xsync.Group
|
||||||
|
|
||||||
lazyDownloadFunc LazyDownloadIndexHeaderFunc
|
lazyDownloadFunc LazyDownloadIndexHeaderFunc
|
||||||
}
|
}
|
||||||
|
@ -123,18 +127,16 @@ func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTime
|
||||||
// with lazy reader enabled, this function will return a lazy reader. The returned lazy reader
|
// with lazy reader enabled, this function will return a lazy reader. The returned lazy reader
|
||||||
// is tracked by the pool and automatically closed once the idle timeout expires.
|
// is tracked by the pool and automatically closed once the idle timeout expires.
|
||||||
func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int, meta *metadata.Meta) (Reader, error) {
|
func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int, meta *metadata.Meta) (Reader, error) {
|
||||||
var reader Reader
|
if !p.lazyReaderEnabled {
|
||||||
var err error
|
return NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.binaryReader)
|
||||||
|
|
||||||
if p.lazyReaderEnabled {
|
|
||||||
reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed, p.lazyDownloadFunc(meta))
|
|
||||||
} else {
|
|
||||||
reader, err = NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.binaryReader)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
idBytes := id.Bytes()
|
||||||
return nil, err
|
lazyReader, err, _ := p.lazyReadersSF.Do(*(*string)(unsafe.Pointer(&idBytes)), func() (interface{}, error) {
|
||||||
}
|
return NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed, p.lazyDownloadFunc(meta))
|
||||||
|
})
|
||||||
|
|
||||||
|
reader := lazyReader.(Reader)
|
||||||
|
|
||||||
// Keep track of lazy readers only if required.
|
// Keep track of lazy readers only if required.
|
||||||
if p.lazyReaderEnabled && p.lazyReaderIdleTimeout > 0 {
|
if p.lazyReaderEnabled && p.lazyReaderIdleTimeout > 0 {
|
||||||
|
|
|
@ -6,12 +6,16 @@ package indexheader
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
|
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/thanos-io/objstore"
|
||||||
"github.com/thanos-io/objstore/providers/filesystem"
|
"github.com/thanos-io/objstore/providers/filesystem"
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
|
@ -132,3 +136,60 @@ func TestReaderPool_ShouldCloseIdleLazyReaders(t *testing.T) {
|
||||||
testutil.Equals(t, float64(2), promtestutil.ToFloat64(metrics.lazyReader.loadCount))
|
testutil.Equals(t, float64(2), promtestutil.ToFloat64(metrics.lazyReader.loadCount))
|
||||||
testutil.Equals(t, float64(2), promtestutil.ToFloat64(metrics.lazyReader.unloadCount))
|
testutil.Equals(t, float64(2), promtestutil.ToFloat64(metrics.lazyReader.unloadCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReaderPool_MultipleReaders(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
blkDir := t.TempDir()
|
||||||
|
|
||||||
|
bkt := objstore.NewInMemBucket()
|
||||||
|
b1, err := e2eutil.CreateBlock(ctx, blkDir, []labels.Labels{
|
||||||
|
labels.New(labels.Label{Name: "a", Value: "1"}),
|
||||||
|
labels.New(labels.Label{Name: "a", Value: "2"}),
|
||||||
|
labels.New(labels.Label{Name: "a", Value: "3"}),
|
||||||
|
labels.New(labels.Label{Name: "a", Value: "4"}),
|
||||||
|
labels.New(labels.Label{Name: "b", Value: "1"}),
|
||||||
|
}, 100, 0, 1000, labels.New(labels.Label{Name: "ext1", Value: "val1"}), 124, metadata.NoneFunc, nil)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, filepath.Join(blkDir, b1.String()), metadata.NoneFunc))
|
||||||
|
|
||||||
|
readerPool := NewReaderPool(
|
||||||
|
log.NewNopLogger(),
|
||||||
|
true,
|
||||||
|
time.Minute,
|
||||||
|
NewReaderPoolMetrics(prometheus.NewRegistry()),
|
||||||
|
AlwaysEagerDownloadIndexHeader,
|
||||||
|
)
|
||||||
|
|
||||||
|
dlDir := t.TempDir()
|
||||||
|
|
||||||
|
m, err := metadata.ReadFromDir(filepath.Join(blkDir, b1.String()))
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
startWg := &sync.WaitGroup{}
|
||||||
|
startWg.Add(1)
|
||||||
|
|
||||||
|
waitWg := &sync.WaitGroup{}
|
||||||
|
|
||||||
|
const readersCount = 10
|
||||||
|
waitWg.Add(readersCount)
|
||||||
|
for i := 0; i < readersCount; i++ {
|
||||||
|
go func() {
|
||||||
|
defer waitWg.Done()
|
||||||
|
t.Logf("waiting")
|
||||||
|
startWg.Wait()
|
||||||
|
t.Logf("starting")
|
||||||
|
|
||||||
|
br, err := readerPool.NewBinaryReader(ctx, log.NewNopLogger(), bkt, dlDir, b1, 32, m)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
testutil.Ok(t, br.Close())
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
startWg.Done()
|
||||||
|
waitWg.Wait()
|
||||||
|
}
|
||||||
|
|
|
@ -53,6 +53,11 @@ const (
|
||||||
TSDBVersion1 = 1
|
TSDBVersion1 = 1
|
||||||
// ThanosVersion1 is a enumeration of Thanos section of TSDB meta supported by Thanos.
|
// ThanosVersion1 is a enumeration of Thanos section of TSDB meta supported by Thanos.
|
||||||
ThanosVersion1 = 1
|
ThanosVersion1 = 1
|
||||||
|
|
||||||
|
// ParquetMigratedExtensionKey is the key used in block extensions to indicate
|
||||||
|
// that the block has been migrated to parquet format and can be safely ignored
|
||||||
|
// by store gateways.
|
||||||
|
ParquetMigratedExtensionKey = "parquet_migrated"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Meta describes the a block's meta. It wraps the known TSDB meta structure and
|
// Meta describes the a block's meta. It wraps the known TSDB meta structure and
|
||||||
|
|
|
@ -237,25 +237,25 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
|
||||||
testutil.Assert(t, os.IsNotExist(err), "dir %s should be remove after compaction.", dir)
|
testutil.Assert(t, os.IsNotExist(err), "dir %s should be remove after compaction.", dir)
|
||||||
|
|
||||||
// Test label name with slash, regression: https://github.com/thanos-io/thanos/issues/1661.
|
// Test label name with slash, regression: https://github.com/thanos-io/thanos/issues/1661.
|
||||||
extLabels := labels.Labels{{Name: "e1", Value: "1/weird"}}
|
extLabels := labels.FromStrings("e1", "1/weird")
|
||||||
extLabels2 := labels.Labels{{Name: "e1", Value: "1"}}
|
extLabels2 := labels.FromStrings("e1", "1")
|
||||||
metas := createAndUpload(t, bkt, []blockgenSpec{
|
metas := createAndUpload(t, bkt, []blockgenSpec{
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 500, maxt: 1000, extLset: extLabels, res: 124,
|
numSamples: 100, mint: 500, maxt: 1000, extLset: extLabels, res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{
|
||||||
{{Name: "a", Value: "1"}},
|
labels.FromStrings("a", "1"),
|
||||||
{{Name: "a", Value: "2"}, {Name: "b", Value: "2"}},
|
labels.FromStrings("a", "2", "b", "2"),
|
||||||
{{Name: "a", Value: "3"}},
|
labels.FromStrings("a", "3"),
|
||||||
{{Name: "a", Value: "4"}},
|
labels.FromStrings("a", "4"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 2000, maxt: 3000, extLset: extLabels, res: 124,
|
numSamples: 100, mint: 2000, maxt: 3000, extLset: extLabels, res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{
|
||||||
{{Name: "a", Value: "3"}},
|
labels.FromStrings("a", "3"),
|
||||||
{{Name: "a", Value: "4"}},
|
labels.FromStrings("a", "4"),
|
||||||
{{Name: "a", Value: "5"}},
|
labels.FromStrings("a", "5"),
|
||||||
{{Name: "a", Value: "6"}},
|
labels.FromStrings("a", "6"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Mix order to make sure compactor is able to deduct min time / max time.
|
// Mix order to make sure compactor is able to deduct min time / max time.
|
||||||
|
@ -268,48 +268,40 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
|
||||||
// Due to TSDB compaction delay (not compacting fresh block), we need one more block to be pushed to trigger compaction.
|
// Due to TSDB compaction delay (not compacting fresh block), we need one more block to be pushed to trigger compaction.
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 3000, maxt: 4000, extLset: extLabels, res: 124,
|
numSamples: 100, mint: 3000, maxt: 4000, extLset: extLabels, res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{labels.FromStrings("a", "7")},
|
||||||
{{Name: "a", Value: "7"}},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
// Extra block for "distraction" for different resolution and one for different labels.
|
// Extra block for "distraction" for different resolution and one for different labels.
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 5000, maxt: 6000, extLset: labels.Labels{{Name: "e1", Value: "2"}}, res: 124,
|
numSamples: 100, mint: 5000, maxt: 6000, extLset: labels.FromStrings("e1", "2"), res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{labels.FromStrings("a", "7")},
|
||||||
{{Name: "a", Value: "7"}},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
// Extra block for "distraction" for different resolution and one for different labels.
|
// Extra block for "distraction" for different resolution and one for different labels.
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 4000, maxt: 5000, extLset: extLabels, res: 0,
|
numSamples: 100, mint: 4000, maxt: 5000, extLset: extLabels, res: 0,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{labels.FromStrings("a", "7")},
|
||||||
{{Name: "a", Value: "7"}},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
// Second group (extLabels2).
|
// Second group (extLabels2).
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 2000, maxt: 3000, extLset: extLabels2, res: 124,
|
numSamples: 100, mint: 2000, maxt: 3000, extLset: extLabels2, res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{
|
||||||
{{Name: "a", Value: "3"}},
|
labels.FromStrings("a", "3"),
|
||||||
{{Name: "a", Value: "4"}},
|
labels.FromStrings("a", "4"),
|
||||||
{{Name: "a", Value: "6"}},
|
labels.FromStrings("a", "6"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 0, maxt: 1000, extLset: extLabels2, res: 124,
|
numSamples: 100, mint: 0, maxt: 1000, extLset: extLabels2, res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{
|
||||||
{{Name: "a", Value: "1"}},
|
labels.FromStrings("a", "1"),
|
||||||
{{Name: "a", Value: "2"}, {Name: "b", Value: "2"}},
|
labels.FromStrings("a", "2", "b", "2"),
|
||||||
{{Name: "a", Value: "3"}},
|
labels.FromStrings("a", "3"),
|
||||||
{{Name: "a", Value: "4"}},
|
labels.FromStrings("a", "4"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Due to TSDB compaction delay (not compacting fresh block), we need one more block to be pushed to trigger compaction.
|
// Due to TSDB compaction delay (not compacting fresh block), we need one more block to be pushed to trigger compaction.
|
||||||
{
|
{
|
||||||
numSamples: 100, mint: 3000, maxt: 4000, extLset: extLabels2, res: 124,
|
numSamples: 100, mint: 3000, maxt: 4000, extLset: extLabels2, res: 124,
|
||||||
series: []labels.Labels{
|
series: []labels.Labels{labels.FromStrings("a", "7")},
|
||||||
{{Name: "a", Value: "7"}},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -2192,10 +2192,10 @@ func (b *memBlock) addSeries(s *series) {
|
||||||
b.postings = append(b.postings, sid)
|
b.postings = append(b.postings, sid)
|
||||||
b.series = append(b.series, s)
|
b.series = append(b.series, s)
|
||||||
|
|
||||||
for _, l := range s.lset {
|
s.lset.Range(func(l labels.Label) {
|
||||||
b.symbols[l.Name] = struct{}{}
|
b.symbols[l.Name] = struct{}{}
|
||||||
b.symbols[l.Value] = struct{}{}
|
b.symbols[l.Value] = struct{}{}
|
||||||
}
|
})
|
||||||
|
|
||||||
for i, cm := range s.chunks {
|
for i, cm := range s.chunks {
|
||||||
if b.minTime == -1 || cm.MinTime < b.minTime {
|
if b.minTime == -1 || cm.MinTime < b.minTime {
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright (c) The Thanos Authors.
|
||||||
|
// Licensed under the Apache License 2.0.
|
||||||
|
|
||||||
|
package compressutil
|
||||||
|
|
||||||
|
import "github.com/prometheus/prometheus/util/compression"
|
||||||
|
|
||||||
|
// ParseCompressionType parses the two compression-related configuration values and returns the CompressionType.
|
||||||
|
func ParseCompressionType(compress bool, compressType compression.Type) compression.Type {
|
||||||
|
if compress {
|
||||||
|
return compressType
|
||||||
|
}
|
||||||
|
return compression.None
|
||||||
|
}
|
|
@ -24,8 +24,9 @@ import (
|
||||||
|
|
||||||
// EndpointGroupGRPCOpts creates gRPC dial options for connecting to endpoint groups.
|
// EndpointGroupGRPCOpts creates gRPC dial options for connecting to endpoint groups.
|
||||||
// For details on retry capabilities, see https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy-capabilities
|
// For details on retry capabilities, see https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy-capabilities
|
||||||
func EndpointGroupGRPCOpts() []grpc.DialOption {
|
func EndpointGroupGRPCOpts(serviceConfig string) []grpc.DialOption {
|
||||||
serviceConfig := `
|
if serviceConfig == "" {
|
||||||
|
serviceConfig = `
|
||||||
{
|
{
|
||||||
"loadBalancingPolicy":"round_robin",
|
"loadBalancingPolicy":"round_robin",
|
||||||
"retryPolicy": {
|
"retryPolicy": {
|
||||||
|
@ -37,6 +38,7 @@ func EndpointGroupGRPCOpts() []grpc.DialOption {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
}
|
||||||
|
|
||||||
return []grpc.DialOption{
|
return []grpc.DialOption{
|
||||||
grpc.WithDefaultServiceConfig(serviceConfig),
|
grpc.WithDefaultServiceConfig(serviceConfig),
|
||||||
|
|
|
@ -483,14 +483,18 @@ func sortDtoMessages(msgs []proto.Message) {
|
||||||
m1 := msgs[i].(*dto.Metric)
|
m1 := msgs[i].(*dto.Metric)
|
||||||
m2 := msgs[j].(*dto.Metric)
|
m2 := msgs[j].(*dto.Metric)
|
||||||
|
|
||||||
lbls1 := labels.Labels{}
|
builder := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, p := range m1.GetLabel() {
|
for _, p := range m1.GetLabel() {
|
||||||
lbls1 = append(lbls1, labels.Label{Name: *p.Name, Value: *p.Value})
|
builder.Set(p.GetName(), p.GetValue())
|
||||||
}
|
}
|
||||||
lbls2 := labels.Labels{}
|
lbls1 := builder.Labels()
|
||||||
|
|
||||||
|
builder.Reset(labels.EmptyLabels())
|
||||||
|
|
||||||
for _, p := range m2.GetLabel() {
|
for _, p := range m2.GetLabel() {
|
||||||
lbls2 = append(lbls2, labels.Label{Name: *p.Name, Value: *p.Value})
|
builder.Set(p.GetName(), p.GetValue())
|
||||||
}
|
}
|
||||||
|
lbls2 := builder.Labels()
|
||||||
|
|
||||||
return labels.Compare(lbls1, lbls2) < 0
|
return labels.Compare(lbls1, lbls2) < 0
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,16 +11,16 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
|
|
||||||
"github.com/thanos-io/promql-engine/execution/function"
|
"github.com/thanos-io/promql-engine/execution/parse"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseExpr parses the input PromQL expression and returns the parsed representation.
|
// ParseExpr parses the input PromQL expression and returns the parsed representation.
|
||||||
func ParseExpr(input string) (parser.Expr, error) {
|
func ParseExpr(input string) (parser.Expr, error) {
|
||||||
allFuncs := make(map[string]*parser.Function, len(function.XFunctions)+len(parser.Functions))
|
allFuncs := make(map[string]*parser.Function, len(parse.XFunctions)+len(parser.Functions))
|
||||||
for k, v := range parser.Functions {
|
for k, v := range parser.Functions {
|
||||||
allFuncs[k] = v
|
allFuncs[k] = v
|
||||||
}
|
}
|
||||||
for k, v := range function.XFunctions {
|
for k, v := range parse.XFunctions {
|
||||||
allFuncs[k] = v
|
allFuncs[k] = v
|
||||||
}
|
}
|
||||||
p := parser.NewParser(input, parser.WithFunctions(allFuncs))
|
p := parser.NewParser(input, parser.WithFunctions(allFuncs))
|
||||||
|
|
|
@ -703,10 +703,8 @@ func TestEndpointSetUpdate_AvailabilityScenarios(t *testing.T) {
|
||||||
|
|
||||||
lset := e.LabelSets()
|
lset := e.LabelSets()
|
||||||
testutil.Equals(t, 2, len(lset))
|
testutil.Equals(t, 2, len(lset))
|
||||||
testutil.Equals(t, "addr", lset[0][0].Name)
|
testutil.Equals(t, addr, lset[0].Get("addr"))
|
||||||
testutil.Equals(t, addr, lset[0][0].Value)
|
testutil.Equals(t, "b", lset[1].Get("a"))
|
||||||
testutil.Equals(t, "a", lset[1][0].Name)
|
|
||||||
testutil.Equals(t, "b", lset[1][0].Value)
|
|
||||||
assertRegisteredAPIs(t, endpoints.exposedAPIs[addr], e)
|
assertRegisteredAPIs(t, endpoints.exposedAPIs[addr], e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -738,10 +736,8 @@ func TestEndpointSetUpdate_AvailabilityScenarios(t *testing.T) {
|
||||||
|
|
||||||
lset := st.LabelSets()
|
lset := st.LabelSets()
|
||||||
testutil.Equals(t, 2, len(lset))
|
testutil.Equals(t, 2, len(lset))
|
||||||
testutil.Equals(t, "addr", lset[0][0].Name)
|
testutil.Equals(t, addr, lset[0].Get("addr"))
|
||||||
testutil.Equals(t, addr, lset[0][0].Value)
|
testutil.Equals(t, "b", lset[1].Get("a"))
|
||||||
testutil.Equals(t, "a", lset[1][0].Name)
|
|
||||||
testutil.Equals(t, "b", lset[1][0].Value)
|
|
||||||
testutil.Equals(t, expected, endpointSet.endpointsMetric.storeNodes)
|
testutil.Equals(t, expected, endpointSet.endpointsMetric.storeNodes)
|
||||||
|
|
||||||
// New big batch of endpoints.
|
// New big batch of endpoints.
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
@ -362,7 +361,7 @@ func TestQuerier_Select_AfterPromQL(t *testing.T) {
|
||||||
// Regression test 1 against https://github.com/thanos-io/thanos/issues/2890.
|
// Regression test 1 against https://github.com/thanos-io/thanos/issues/2890.
|
||||||
name: "when switching replicas don't miss samples when set with a big enough lookback delta",
|
name: "when switching replicas don't miss samples when set with a big enough lookback delta",
|
||||||
storeAPI: newProxyStore(func() storepb.StoreServer {
|
storeAPI: newProxyStore(func() storepb.StoreServer {
|
||||||
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, nil, "./testdata/issue2890-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, labels.EmptyLabels(), "./testdata/issue2890-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
return s
|
return s
|
||||||
}()),
|
}()),
|
||||||
|
@ -488,7 +487,7 @@ func TestQuerier_Select(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedAfterDedup: []series{{
|
expectedAfterDedup: []series{{
|
||||||
lset: nil,
|
lset: labels.EmptyLabels(),
|
||||||
// We don't expect correctness here, it's just random non-replica data.
|
// We don't expect correctness here, it's just random non-replica data.
|
||||||
samples: []sample{{1, 1}, {2, 2}, {3, 3}, {5, 5}, {6, 6}, {7, 7}},
|
samples: []sample{{1, 1}, {2, 2}, {3, 3}, {5, 5}, {6, 6}, {7, 7}},
|
||||||
}},
|
}},
|
||||||
|
@ -497,7 +496,7 @@ func TestQuerier_Select(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "realistic data with stale marker",
|
name: "realistic data with stale marker",
|
||||||
storeEndpoints: []storepb.StoreServer{func() storepb.StoreServer {
|
storeEndpoints: []storepb.StoreServer{func() storepb.StoreServer {
|
||||||
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, nil, "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, labels.EmptyLabels(), "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
return s
|
return s
|
||||||
}()},
|
}()},
|
||||||
|
@ -541,7 +540,7 @@ func TestQuerier_Select(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "realistic data with stale marker with 100000 step",
|
name: "realistic data with stale marker with 100000 step",
|
||||||
storeEndpoints: []storepb.StoreServer{func() storepb.StoreServer {
|
storeEndpoints: []storepb.StoreServer{func() storepb.StoreServer {
|
||||||
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, nil, "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, labels.EmptyLabels(), "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
return s
|
return s
|
||||||
}()},
|
}()},
|
||||||
|
@ -592,7 +591,7 @@ func TestQuerier_Select(t *testing.T) {
|
||||||
// Thanks to @Superq and GitLab for real data reproducing this.
|
// Thanks to @Superq and GitLab for real data reproducing this.
|
||||||
name: "realistic data with stale marker with hints rate function",
|
name: "realistic data with stale marker with hints rate function",
|
||||||
storeEndpoints: []storepb.StoreServer{func() storepb.StoreServer {
|
storeEndpoints: []storepb.StoreServer{func() storepb.StoreServer {
|
||||||
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, nil, "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, labels.EmptyLabels(), "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
return s
|
return s
|
||||||
}()},
|
}()},
|
||||||
|
@ -860,19 +859,12 @@ func newProxyStore(storeAPIs ...storepb.StoreServer) *store.ProxyStore {
|
||||||
nil,
|
nil,
|
||||||
func() []store.Client { return cls },
|
func() []store.Client { return cls },
|
||||||
component.Query,
|
component.Query,
|
||||||
nil,
|
labels.EmptyLabels(),
|
||||||
0,
|
0,
|
||||||
store.EagerRetrieval,
|
store.EagerRetrieval,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
var emptyLabelsSameAsNotAllocatedLabels = cmp.Transformer("", func(l labels.Labels) labels.Labels {
|
|
||||||
if len(l) == 0 {
|
|
||||||
return labels.Labels(nil)
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
})
|
|
||||||
|
|
||||||
func testSelectResponse(t *testing.T, expected []series, res storage.SeriesSet) {
|
func testSelectResponse(t *testing.T, expected []series, res storage.SeriesSet) {
|
||||||
var series []storage.Series
|
var series []storage.Series
|
||||||
// Use it as PromQL would do, first gather all series.
|
// Use it as PromQL would do, first gather all series.
|
||||||
|
@ -889,7 +881,7 @@ func testSelectResponse(t *testing.T, expected []series, res storage.SeriesSet)
|
||||||
}())
|
}())
|
||||||
|
|
||||||
for i, s := range series {
|
for i, s := range series {
|
||||||
testutil.WithGoCmp(emptyLabelsSameAsNotAllocatedLabels).Equals(t, expected[i].lset, s.Labels())
|
testutil.Assert(t, labels.Equal(expected[i].Labels(), s.Labels()))
|
||||||
samples := expandSeries(t, s.Iterator(nil))
|
samples := expandSeries(t, s.Iterator(nil))
|
||||||
expectedCpy := make([]sample, 0, len(expected[i].samples))
|
expectedCpy := make([]sample, 0, len(expected[i].samples))
|
||||||
for _, s := range expected[i].samples {
|
for _, s := range expected[i].samples {
|
||||||
|
@ -914,15 +906,10 @@ func jsonToSeries(t *testing.T, filename string) []series {
|
||||||
|
|
||||||
var ss []series
|
var ss []series
|
||||||
for _, ser := range data.Data.Results {
|
for _, ser := range data.Data.Results {
|
||||||
var lbls labels.Labels
|
builder := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for n, v := range ser.Metric {
|
for n, v := range ser.Metric {
|
||||||
lbls = append(lbls, labels.Label{
|
builder.Set(string(n), string(v))
|
||||||
Name: string(n),
|
|
||||||
Value: string(v),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
// Label names need to be sorted.
|
|
||||||
sort.Sort(lbls)
|
|
||||||
|
|
||||||
var smpls []sample
|
var smpls []sample
|
||||||
for _, smp := range ser.Values {
|
for _, smp := range ser.Values {
|
||||||
|
@ -933,7 +920,7 @@ func jsonToSeries(t *testing.T, filename string) []series {
|
||||||
}
|
}
|
||||||
|
|
||||||
ss = append(ss, series{
|
ss = append(ss, series{
|
||||||
lset: lbls,
|
lset: builder.Labels(),
|
||||||
samples: smpls,
|
samples: smpls,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1073,7 +1060,7 @@ func TestQuerierWithDedupUnderstoodByPromQL_Rate(t *testing.T) {
|
||||||
|
|
||||||
logger := log.NewLogfmtLogger(os.Stderr)
|
logger := log.NewLogfmtLogger(os.Stderr)
|
||||||
|
|
||||||
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, nil, "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
s, err := store.NewLocalStoreFromJSONMmappableFile(logger, component.Debug, labels.EmptyLabels(), "./testdata/issue2401-seriesresponses.json", store.ScanGRPCCurlProtoStreamMessages)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
t.Run("dedup=false", func(t *testing.T) {
|
t.Run("dedup=false", func(t *testing.T) {
|
||||||
|
@ -1260,9 +1247,9 @@ func (s *testStoreServer) Series(r *storepb.SeriesRequest, srv storepb.Store_Ser
|
||||||
func storeSeriesResponse(t testing.TB, lset labels.Labels, smplChunks ...[]sample) *storepb.SeriesResponse {
|
func storeSeriesResponse(t testing.TB, lset labels.Labels, smplChunks ...[]sample) *storepb.SeriesResponse {
|
||||||
var s storepb.Series
|
var s storepb.Series
|
||||||
|
|
||||||
for _, l := range lset {
|
lset.Range(func(l labels.Label) {
|
||||||
s.Labels = append(s.Labels, labelpb.ZLabel{Name: l.Name, Value: l.Value})
|
s.Labels = append(s.Labels, labelpb.ZLabel{Name: l.Name, Value: l.Value})
|
||||||
}
|
})
|
||||||
|
|
||||||
for _, smpls := range smplChunks {
|
for _, smpls := range smplChunks {
|
||||||
c := chunkenc.NewXORChunk()
|
c := chunkenc.NewXORChunk()
|
||||||
|
|
|
@ -75,7 +75,7 @@ func benchQuerySelect(t testutil.TB, totalSamples, totalSeries int, dedup bool)
|
||||||
if !dedup || j == 0 {
|
if !dedup || j == 0 {
|
||||||
lset := labelpb.ZLabelsToPromLabels(created[i].Labels).Copy()
|
lset := labelpb.ZLabelsToPromLabels(created[i].Labels).Copy()
|
||||||
if dedup {
|
if dedup {
|
||||||
lset = lset[1:]
|
lset = lset.MatchLabels(false, "a_replica")
|
||||||
}
|
}
|
||||||
expectedSeries = append(expectedSeries, lset)
|
expectedSeries = append(expectedSeries, lset)
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
|
||||||
|
@ -71,7 +72,7 @@ func TestQuerier_Proxy(t *testing.T) {
|
||||||
logger,
|
logger,
|
||||||
nil,
|
nil,
|
||||||
store.NewProxyStore(logger, nil, func() []store.Client { return sc.get() },
|
store.NewProxyStore(logger, nil, func() []store.Client { return sc.get() },
|
||||||
component.Debug, nil, 5*time.Minute, store.EagerRetrieval, store.WithMatcherCache(cache)),
|
component.Debug, labels.EmptyLabels(), 5*time.Minute, store.EagerRetrieval, store.WithMatcherCache(cache)),
|
||||||
1000000,
|
1000000,
|
||||||
5*time.Minute,
|
5*time.Minute,
|
||||||
dedup.AlgorithmPenalty,
|
dedup.AlgorithmPenalty,
|
||||||
|
@ -85,7 +86,7 @@ func TestQuerier_Proxy(t *testing.T) {
|
||||||
// TODO(bwplotka): Parse external labels.
|
// TODO(bwplotka): Parse external labels.
|
||||||
sc.append(&storetestutil.TestClient{
|
sc.append(&storetestutil.TestClient{
|
||||||
Name: fmt.Sprintf("store number %v", i),
|
Name: fmt.Sprintf("store number %v", i),
|
||||||
StoreClient: storepb.ServerAsClient(selectedStore(store.NewTSDBStore(logger, st.storage.DB, component.Debug, nil), m, st.mint, st.maxt)),
|
StoreClient: storepb.ServerAsClient(selectedStore(store.NewTSDBStore(logger, st.storage.DB, component.Debug, labels.EmptyLabels()), m, st.mint, st.maxt)),
|
||||||
MinTime: st.mint,
|
MinTime: st.mint,
|
||||||
MaxTime: st.maxt,
|
MaxTime: st.maxt,
|
||||||
})
|
})
|
||||||
|
|
|
@ -183,6 +183,13 @@ func (r *remoteEngine) MaxT() int64 {
|
||||||
return r.maxt
|
return r.maxt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *remoteEngine) PartitionLabelSets() []labels.Labels {
|
||||||
|
r.labelSetsOnce.Do(func() {
|
||||||
|
r.labelSets = r.adjustedInfos().LabelSets()
|
||||||
|
})
|
||||||
|
return r.labelSets
|
||||||
|
}
|
||||||
|
|
||||||
func (r *remoteEngine) LabelSets() []labels.Labels {
|
func (r *remoteEngine) LabelSets() []labels.Labels {
|
||||||
r.labelSetsOnce.Do(func() {
|
r.labelSetsOnce.Do(func() {
|
||||||
r.labelSets = r.adjustedInfos().LabelSets()
|
r.labelSets = r.adjustedInfos().LabelSets()
|
||||||
|
|
|
@ -39,10 +39,11 @@ func TestRemoteEngine_Warnings(t *testing.T) {
|
||||||
qryExpr, err := extpromql.ParseExpr("up")
|
qryExpr, err := extpromql.ParseExpr("up")
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
plan := logicalplan.NewFromAST(qryExpr, &query.Options{
|
plan, err := logicalplan.NewFromAST(qryExpr, &query.Options{
|
||||||
Start: time.Now(),
|
Start: time.Now(),
|
||||||
End: time.Now().Add(2 * time.Hour),
|
End: time.Now().Add(2 * time.Hour),
|
||||||
}, logicalplan.PlanOptions{})
|
}, logicalplan.PlanOptions{})
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
t.Run("instant_query", func(t *testing.T) {
|
t.Run("instant_query", func(t *testing.T) {
|
||||||
qry, err := engine.NewInstantQuery(context.Background(), nil, plan.Root(), start)
|
qry, err := engine.NewInstantQuery(context.Background(), nil, plan.Root(), start)
|
||||||
|
@ -77,10 +78,11 @@ func TestRemoteEngine_PartialResponse(t *testing.T) {
|
||||||
qryExpr, err := extpromql.ParseExpr("up")
|
qryExpr, err := extpromql.ParseExpr("up")
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
plan := logicalplan.NewFromAST(qryExpr, &query.Options{
|
plan, err := logicalplan.NewFromAST(qryExpr, &query.Options{
|
||||||
Start: time.Now(),
|
Start: time.Now(),
|
||||||
End: time.Now().Add(2 * time.Hour),
|
End: time.Now().Add(2 * time.Hour),
|
||||||
}, logicalplan.PlanOptions{})
|
}, logicalplan.PlanOptions{})
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
t.Run("instant_query", func(t *testing.T) {
|
t.Run("instant_query", func(t *testing.T) {
|
||||||
qry, err := engine.NewInstantQuery(context.Background(), nil, plan.Root(), start)
|
qry, err := engine.NewInstantQuery(context.Background(), nil, plan.Root(), start)
|
||||||
|
|
|
@ -358,7 +358,7 @@ func ParseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if f, err := parseNumber(defLine); err == nil {
|
if f, err := parseNumber(defLine); err == nil {
|
||||||
cmd.expect(0, nil, parser.SequenceValue{Value: f})
|
cmd.expect(0, parser.SequenceValue{Value: f})
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||||
|
@ -373,7 +373,7 @@ func ParseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||||
if len(vals) > 1 {
|
if len(vals) > 1 {
|
||||||
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
|
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
|
||||||
}
|
}
|
||||||
cmd.expect(j, metric, vals...)
|
cmd.expectMetric(j, metric, vals...)
|
||||||
}
|
}
|
||||||
return i, cmd, nil
|
return i, cmd, nil
|
||||||
}
|
}
|
||||||
|
@ -480,13 +480,15 @@ func (ev *evalCmd) String() string {
|
||||||
return "eval"
|
return "eval"
|
||||||
}
|
}
|
||||||
|
|
||||||
// expect adds a new metric with a sequence of values to the set of expected
|
// expect adds a sequence of values to the set of expected
|
||||||
// results for the query.
|
// results for the query.
|
||||||
func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...parser.SequenceValue) {
|
func (ev *evalCmd) expect(pos int, vals ...parser.SequenceValue) {
|
||||||
if m == nil {
|
ev.expected[0] = entry{pos: pos, vals: vals}
|
||||||
ev.expected[0] = entry{pos: pos, vals: vals}
|
}
|
||||||
return
|
|
||||||
}
|
// expectMetric adds a new metric with a sequence of values to the set of expected
|
||||||
|
// results for the query.
|
||||||
|
func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.SequenceValue) {
|
||||||
h := m.Hash()
|
h := m.Hash()
|
||||||
ev.metrics[h] = m
|
ev.metrics[h] = m
|
||||||
ev.expected[h] = entry{pos: pos, vals: vals}
|
ev.expected[h] = entry{pos: pos, vals: vals}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
io "io"
|
io "io"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -19,6 +18,7 @@ import (
|
||||||
"github.com/opentracing/opentracing-go"
|
"github.com/opentracing/opentracing-go"
|
||||||
otlog "github.com/opentracing/opentracing-go/log"
|
otlog "github.com/opentracing/opentracing-go/log"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
v1 "github.com/prometheus/prometheus/web/api/v1"
|
||||||
"github.com/weaveworks/common/httpgrpc"
|
"github.com/weaveworks/common/httpgrpc"
|
||||||
|
|
||||||
"github.com/thanos-io/thanos/internal/cortex/querier/queryrange"
|
"github.com/thanos-io/thanos/internal/cortex/querier/queryrange"
|
||||||
|
@ -28,11 +28,6 @@ import (
|
||||||
"github.com/thanos-io/thanos/pkg/store/labelpb"
|
"github.com/thanos-io/thanos/pkg/store/labelpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
infMinTime = time.Unix(math.MinInt64/1000+62135596801, 0)
|
|
||||||
infMaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999)
|
|
||||||
)
|
|
||||||
|
|
||||||
// labelsCodec is used to encode/decode Thanos labels and series requests and responses.
|
// labelsCodec is used to encode/decode Thanos labels and series requests and responses.
|
||||||
type labelsCodec struct {
|
type labelsCodec struct {
|
||||||
partialResponse bool
|
partialResponse bool
|
||||||
|
@ -400,8 +395,8 @@ func parseMetadataTimeRange(r *http.Request, defaultMetadataTimeRange time.Durat
|
||||||
// If start and end time not specified as query parameter, we get the range from the beginning of time by default.
|
// If start and end time not specified as query parameter, we get the range from the beginning of time by default.
|
||||||
var defaultStartTime, defaultEndTime time.Time
|
var defaultStartTime, defaultEndTime time.Time
|
||||||
if defaultMetadataTimeRange == 0 {
|
if defaultMetadataTimeRange == 0 {
|
||||||
defaultStartTime = infMinTime
|
defaultStartTime = v1.MinTime
|
||||||
defaultEndTime = infMaxTime
|
defaultEndTime = v1.MaxTime
|
||||||
} else {
|
} else {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
defaultStartTime = now.Add(-defaultMetadataTimeRange)
|
defaultStartTime = now.Add(-defaultMetadataTimeRange)
|
||||||
|
|
|
@ -68,7 +68,7 @@ func newBlockBaseQuerier(b prom_tsdb.BlockReader, mint, maxt int64) (*blockBaseQ
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||||
res, err := q.index.SortedLabelValues(ctx, name, matchers...)
|
res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...)
|
||||||
return res, nil, err
|
return res, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -145,12 +145,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
})
|
})
|
||||||
sort.Stable(ByLabelName(labels))
|
sort.Stable(ByLabelName(labels))
|
||||||
|
|
||||||
|
labelNamer := prometheustranslator.LabelNamer{}
|
||||||
// map ensures no duplicate label names.
|
// map ensures no duplicate label names.
|
||||||
l := make(map[string]string, maxLabelCount)
|
l := make(map[string]string, maxLabelCount)
|
||||||
for _, label := range labels {
|
for _, label := range labels {
|
||||||
finalKey := label.Name
|
finalKey := label.Name
|
||||||
if !settings.AllowUTF8 {
|
if !settings.AllowUTF8 {
|
||||||
finalKey = prometheustranslator.NormalizeLabel(finalKey)
|
finalKey = labelNamer.Build(finalKey)
|
||||||
}
|
}
|
||||||
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
|
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
|
||||||
l[finalKey] = existingValue + ";" + label.Value
|
l[finalKey] = existingValue + ";" + label.Value
|
||||||
|
@ -162,7 +163,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
for _, lbl := range promotedAttrs {
|
for _, lbl := range promotedAttrs {
|
||||||
normalized := lbl.Name
|
normalized := lbl.Name
|
||||||
if !settings.AllowUTF8 {
|
if !settings.AllowUTF8 {
|
||||||
normalized = prometheustranslator.NormalizeLabel(normalized)
|
normalized = labelNamer.Build(normalized)
|
||||||
}
|
}
|
||||||
if _, exists := l[normalized]; !exists {
|
if _, exists := l[normalized]; !exists {
|
||||||
l[normalized] = lbl.Value
|
l[normalized] = lbl.Value
|
||||||
|
@ -202,7 +203,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
}
|
}
|
||||||
// internal labels should be maintained
|
// internal labels should be maintained
|
||||||
if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
|
if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
|
||||||
name = prometheustranslator.NormalizeLabel(name)
|
name = labelNamer.Build(name)
|
||||||
}
|
}
|
||||||
l[name] = extras[i+1]
|
l[name] = extras[i+1]
|
||||||
}
|
}
|
||||||
|
|
|
@ -1236,3 +1236,59 @@ func TestProxyStoreWithReplicas_Acceptance(t *testing.T) {
|
||||||
|
|
||||||
testStoreAPIsAcceptance(t, startStore)
|
testStoreAPIsAcceptance(t, startStore)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestTSDBSelectorFilteringBehavior tests that TSDBSelector properly filters stores
|
||||||
|
// based on relabel configuration, ensuring that only matching stores are included
|
||||||
|
// in TSDBInfos, LabelValues, and other metadata operations.
|
||||||
|
func TestTSDBSelectorFilteringBehavior(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
startStore := func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer {
|
||||||
|
startNestedStore := func(tt *testing.T, extLset labels.Labels, appendFn func(app storage.Appender)) storepb.StoreServer {
|
||||||
|
db, err := e2eutil.NewTSDB()
|
||||||
|
testutil.Ok(tt, err)
|
||||||
|
tt.Cleanup(func() { testutil.Ok(tt, db.Close()) })
|
||||||
|
appendFn(db.Appender(context.Background()))
|
||||||
|
|
||||||
|
return NewTSDBStore(nil, db, component.Rule, extLset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this TSDB will be selected
|
||||||
|
store1 := startNestedStore(tt, extLset, appendFn)
|
||||||
|
|
||||||
|
// this TSDB should be dropped
|
||||||
|
droppedLset := labels.New(labels.Label{Name: "foo", Value: "bar"})
|
||||||
|
store2 := startNestedStore(tt, droppedLset, appendFn)
|
||||||
|
|
||||||
|
clients := []Client{
|
||||||
|
storetestutil.TestClient{
|
||||||
|
StoreClient: storepb.ServerAsClient(store1),
|
||||||
|
ExtLset: []labels.Labels{extLset},
|
||||||
|
},
|
||||||
|
storetestutil.TestClient{
|
||||||
|
StoreClient: storepb.ServerAsClient(store2),
|
||||||
|
ExtLset: []labels.Labels{droppedLset},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create relabel config to keep only the labels in the extLset
|
||||||
|
relabelCfgs := []*relabel.Config{{
|
||||||
|
SourceLabels: []model.LabelName{"foo"},
|
||||||
|
Regex: relabel.MustNewRegexp("bar"),
|
||||||
|
Action: relabel.Drop,
|
||||||
|
}}
|
||||||
|
|
||||||
|
return NewProxyStore(
|
||||||
|
nil, nil,
|
||||||
|
func() []Client { return clients },
|
||||||
|
component.Query,
|
||||||
|
labels.EmptyLabels(),
|
||||||
|
0*time.Second,
|
||||||
|
RetrievalStrategy(EagerRetrieval),
|
||||||
|
WithTSDBSelector(NewTSDBSelector(relabelCfgs)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
testStoreAPIsAcceptance(t, startStore)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -28,12 +28,12 @@ var (
|
||||||
sep = []byte{'\xff'}
|
sep = []byte{'\xff'}
|
||||||
)
|
)
|
||||||
|
|
||||||
func noAllocString(buf []byte) string {
|
func safeBytes(buf string) []byte {
|
||||||
return *(*string)(unsafe.Pointer(&buf))
|
return []byte(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func noAllocBytes(buf string) []byte {
|
func safeString(buf []byte) string {
|
||||||
return *(*[]byte)(unsafe.Pointer(&buf))
|
return string(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZLabelsFromPromLabels converts Prometheus labels to slice of labelpb.ZLabel in type unsafe manner.
|
// ZLabelsFromPromLabels converts Prometheus labels to slice of labelpb.ZLabel in type unsafe manner.
|
||||||
|
@ -65,8 +65,8 @@ func ReAllocZLabelsStrings(lset *[]ZLabel, intern bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for j, l := range *lset {
|
for j, l := range *lset {
|
||||||
(*lset)[j].Name = string(noAllocBytes(l.Name))
|
(*lset)[j].Name = string(safeBytes(l.Name))
|
||||||
(*lset)[j].Value = string(noAllocBytes(l.Value))
|
(*lset)[j].Value = string(safeBytes(l.Value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ func internLabelString(s string) string {
|
||||||
// detachAndInternLabelString reallocates the label string to detach it
|
// detachAndInternLabelString reallocates the label string to detach it
|
||||||
// from a bigger memory pool and interns the string.
|
// from a bigger memory pool and interns the string.
|
||||||
func detachAndInternLabelString(s string) string {
|
func detachAndInternLabelString(s string) string {
|
||||||
return internLabelString(string(noAllocBytes(s)))
|
return internLabelString(string(safeBytes(s)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZLabelSetsToPromLabelSets converts slice of labelpb.ZLabelSet to slice of Prometheus labels.
|
// ZLabelSetsToPromLabelSets converts slice of labelpb.ZLabelSet to slice of Prometheus labels.
|
||||||
|
@ -191,7 +191,7 @@ func (m *ZLabel) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Name = noAllocString(data[iNdEx:postIndex])
|
m.Name = safeString(data[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 2:
|
case 2:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
|
@ -223,7 +223,7 @@ func (m *ZLabel) Unmarshal(data []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.Value = noAllocString(data[iNdEx:postIndex])
|
m.Value = safeString(data[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
|
@ -333,8 +333,8 @@ func (m *ZLabelSet) PromLabels() labels.Labels {
|
||||||
func DeepCopy(lbls []ZLabel) []ZLabel {
|
func DeepCopy(lbls []ZLabel) []ZLabel {
|
||||||
ret := make([]ZLabel, len(lbls))
|
ret := make([]ZLabel, len(lbls))
|
||||||
for i := range lbls {
|
for i := range lbls {
|
||||||
ret[i].Name = string(noAllocBytes(lbls[i].Name))
|
ret[i].Name = string(safeBytes(lbls[i].Name))
|
||||||
ret[i].Value = string(noAllocBytes(lbls[i].Value))
|
ret[i].Value = string(safeBytes(lbls[i].Value))
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
// Copyright (c) The Thanos Authors.
|
// Copyright (c) The Thanos Authors.
|
||||||
// Licensed under the Apache License 2.0.
|
// Licensed under the Apache License 2.0.
|
||||||
|
|
||||||
//go:build !stringlabels
|
|
||||||
|
|
||||||
package labelpb
|
package labelpb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -180,7 +180,7 @@ func allPostings(ctx context.Context, t testing.TB, ix tsdb.IndexReader) index.P
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchPostings(ctx context.Context, t testing.TB, ix tsdb.IndexReader, m *labels.Matcher) index.Postings {
|
func matchPostings(ctx context.Context, t testing.TB, ix tsdb.IndexReader, m *labels.Matcher) index.Postings {
|
||||||
vals, err := ix.LabelValues(ctx, m.Name)
|
vals, err := ix.LabelValues(ctx, m.Name, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
matching := []string(nil)
|
matching := []string(nil)
|
||||||
|
|
|
@ -195,14 +195,31 @@ func NewProxyStore(
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet {
|
func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet {
|
||||||
stores := s.stores()
|
stores := s.storesForTSDBSelector()
|
||||||
if len(stores) == 0 {
|
if len(stores) == 0 {
|
||||||
|
// We always want to enforce announcing the subset of data that
|
||||||
|
// selector-labels represents. If no stores match the filter,
|
||||||
|
// we still want to enforce announcing this subset.
|
||||||
|
selectorLabels := labelpb.ZLabelsFromPromLabels(s.selectorLabels)
|
||||||
|
if len(selectorLabels) > 0 {
|
||||||
|
return []labelpb.ZLabelSet{{Labels: selectorLabels}}
|
||||||
|
}
|
||||||
return []labelpb.ZLabelSet{}
|
return []labelpb.ZLabelSet{}
|
||||||
}
|
}
|
||||||
|
|
||||||
mergedLabelSets := make(map[uint64]labelpb.ZLabelSet, len(stores))
|
mergedLabelSets := make(map[uint64]labelpb.ZLabelSet, len(stores))
|
||||||
for _, st := range stores {
|
for _, st := range stores {
|
||||||
for _, lset := range st.LabelSets() {
|
// Get filtered label sets from TSDBSelector
|
||||||
|
_, filteredLabelSets := s.tsdbSelector.MatchLabelSets(st.LabelSets()...)
|
||||||
|
|
||||||
|
var labelSetsToProcess []labels.Labels
|
||||||
|
if filteredLabelSets != nil {
|
||||||
|
labelSetsToProcess = filteredLabelSets
|
||||||
|
} else {
|
||||||
|
labelSetsToProcess = st.LabelSets()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lset := range labelSetsToProcess {
|
||||||
mergedLabelSet := labelpb.ExtendSortedLabels(lset, s.selectorLabels)
|
mergedLabelSet := labelpb.ExtendSortedLabels(lset, s.selectorLabels)
|
||||||
mergedLabelSets[mergedLabelSet.Hash()] = labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(mergedLabelSet)}
|
mergedLabelSets[mergedLabelSet.Hash()] = labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(mergedLabelSet)}
|
||||||
}
|
}
|
||||||
|
@ -226,14 +243,14 @@ func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyStore) TimeRange() (int64, int64) {
|
func (s *ProxyStore) TimeRange() (int64, int64) {
|
||||||
stores := s.stores()
|
stores := s.storesForTSDBSelector()
|
||||||
if len(stores) == 0 {
|
if len(stores) == 0 {
|
||||||
return math.MinInt64, math.MaxInt64
|
return math.MinInt64, math.MaxInt64
|
||||||
}
|
}
|
||||||
|
|
||||||
var minTime, maxTime int64 = math.MaxInt64, math.MinInt64
|
var minTime, maxTime int64 = math.MaxInt64, math.MinInt64
|
||||||
for _, s := range stores {
|
for _, st := range stores {
|
||||||
storeMinTime, storeMaxTime := s.TimeRange()
|
storeMinTime, storeMaxTime := st.TimeRange()
|
||||||
if storeMinTime < minTime {
|
if storeMinTime < minTime {
|
||||||
minTime = storeMinTime
|
minTime = storeMinTime
|
||||||
}
|
}
|
||||||
|
@ -246,12 +263,9 @@ func (s *ProxyStore) TimeRange() (int64, int64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyStore) TSDBInfos() []infopb.TSDBInfo {
|
func (s *ProxyStore) TSDBInfos() []infopb.TSDBInfo {
|
||||||
|
stores := s.storesForTSDBSelector()
|
||||||
infos := make([]infopb.TSDBInfo, 0)
|
infos := make([]infopb.TSDBInfo, 0)
|
||||||
for _, st := range s.stores() {
|
for _, st := range stores {
|
||||||
matches, _ := s.tsdbSelector.MatchLabelSets(st.LabelSets()...)
|
|
||||||
if !matches {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
infos = append(infos, st.TSDBInfos()...)
|
infos = append(infos, st.TSDBInfos()...)
|
||||||
}
|
}
|
||||||
return infos
|
return infos
|
||||||
|
@ -572,6 +586,20 @@ func storeInfo(st Client) (storeID string, storeAddr string, isLocalStore bool)
|
||||||
return storeID, storeAddr, isLocalStore
|
return storeID, storeAddr, isLocalStore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// storesForTSDBSelector returns stores that match the TSDBSelector filtering criteria.
|
||||||
|
// This centralizes the TSDBSelector filtering logic used across all ProxyStore methods
|
||||||
|
// for cases where we don't need additional matchers or time range filtering.
|
||||||
|
func (s *ProxyStore) storesForTSDBSelector() []Client {
|
||||||
|
var filteredStores []Client
|
||||||
|
for _, st := range s.stores() {
|
||||||
|
matches, _ := s.tsdbSelector.MatchLabelSets(st.LabelSets()...)
|
||||||
|
if matches {
|
||||||
|
filteredStores = append(filteredStores, st)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filteredStores
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: consider moving the following functions into something like "pkg/pruneutils" since it is also used for exemplars.
|
// TODO: consider moving the following functions into something like "pkg/pruneutils" since it is also used for exemplars.
|
||||||
|
|
||||||
func (s *ProxyStore) matchingStores(ctx context.Context, minTime, maxTime int64, matchers []*labels.Matcher) ([]Client, []labels.Labels, []string) {
|
func (s *ProxyStore) matchingStores(ctx context.Context, minTime, maxTime int64, matchers []*labels.Matcher) ([]Client, []labels.Labels, []string) {
|
||||||
|
@ -580,7 +608,7 @@ func (s *ProxyStore) matchingStores(ctx context.Context, minTime, maxTime int64,
|
||||||
storeLabelSets []labels.Labels
|
storeLabelSets []labels.Labels
|
||||||
storeDebugMsgs []string
|
storeDebugMsgs []string
|
||||||
)
|
)
|
||||||
for _, st := range s.stores() {
|
for _, st := range s.storesForTSDBSelector() {
|
||||||
// We might be able to skip the store if its meta information indicates it cannot have series matching our query.
|
// We might be able to skip the store if its meta information indicates it cannot have series matching our query.
|
||||||
if ok, reason := storeMatches(ctx, s.debugLogging, st, minTime, maxTime, matchers...); !ok {
|
if ok, reason := storeMatches(ctx, s.debugLogging, st, minTime, maxTime, matchers...); !ok {
|
||||||
if s.debugLogging {
|
if s.debugLogging {
|
||||||
|
@ -588,13 +616,8 @@ func (s *ProxyStore) matchingStores(ctx context.Context, minTime, maxTime int64,
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
matches, extraMatchers := s.tsdbSelector.MatchLabelSets(st.LabelSets()...)
|
// Since we already filtered by TSDBSelector in filteredStores(), we just need to get the extra matchers
|
||||||
if !matches {
|
_, extraMatchers := s.tsdbSelector.MatchLabelSets(st.LabelSets()...)
|
||||||
if s.debugLogging {
|
|
||||||
storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to: %v", st, "tsdb selector"))
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
storeLabelSets = append(storeLabelSets, extraMatchers...)
|
storeLabelSets = append(storeLabelSets, extraMatchers...)
|
||||||
|
|
||||||
stores = append(stores, st)
|
stores = append(stores, st)
|
||||||
|
@ -669,7 +692,7 @@ func storeMatchDebugMetadata(s Client, debugLogging bool, storeDebugMatchers [][
|
||||||
return true, ""
|
return true, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelSetsMatch returns false if all label-set do not match the matchers (aka: OR is between all label-sets).
|
// LabelSetsMatch returns false if all label-sets do not match the matchers (aka: OR is between all label-sets).
|
||||||
func LabelSetsMatch(matchers []*labels.Matcher, lset ...labels.Labels) bool {
|
func LabelSetsMatch(matchers []*labels.Matcher, lset ...labels.Labels) bool {
|
||||||
if len(lset) == 0 {
|
if len(lset) == 0 {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -814,9 +814,10 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
|
|
||||||
req *storepb.SeriesRequest
|
req *storepb.SeriesRequest
|
||||||
|
|
||||||
expectedSeries []rawSeries
|
expectedSeries []rawSeries
|
||||||
expectedErr error
|
expectedErr error
|
||||||
expectedWarningsLen int
|
expectedWarningsLen int
|
||||||
|
expectTimeoutBehavior bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
title: "partial response disabled; 1st errors out after some delay; 2nd store is fast",
|
title: "partial response disabled; 1st errors out after some delay; 2nd store is fast",
|
||||||
|
@ -1210,7 +1211,7 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
expectedErr: errors.New("rpc error: code = Aborted desc = warning"),
|
expectedErr: errors.New("rpc error: code = Aborted desc = warning"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "partial response disabled; all stores respond 3s",
|
title: "partial response disabled; all stores respond with timeout",
|
||||||
storeAPIs: []Client{
|
storeAPIs: []Client{
|
||||||
&storetestutil.TestClient{
|
&storetestutil.TestClient{
|
||||||
StoreClient: &mockedStoreAPI{
|
StoreClient: &mockedStoreAPI{
|
||||||
|
@ -1219,7 +1220,7 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{4, 1}, {5, 2}, {6, 3}}),
|
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{4, 1}, {5, 2}, {6, 3}}),
|
||||||
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{7, 1}, {8, 2}, {9, 3}}),
|
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{7, 1}, {8, 2}, {9, 3}}),
|
||||||
},
|
},
|
||||||
RespDuration: 3 * time.Second,
|
RespDuration: 2 * time.Second,
|
||||||
},
|
},
|
||||||
ExtLset: []labels.Labels{labels.FromStrings("ext", "1")},
|
ExtLset: []labels.Labels{labels.FromStrings("ext", "1")},
|
||||||
MinTime: 1,
|
MinTime: 1,
|
||||||
|
@ -1239,10 +1240,11 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
chunks: [][]sample{{{1, 1}, {2, 2}, {3, 3}}},
|
chunks: [][]sample{{{1, 1}, {2, 2}, {3, 3}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedErr: errors.New("rpc error: code = Aborted desc = receive series from : context deadline exceeded"),
|
expectedErr: errors.New("rpc error: code = Aborted desc = failed to receive any data in 1.5s from : context canceled"),
|
||||||
|
expectTimeoutBehavior: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "partial response enabled; all stores respond 3s",
|
title: "partial response enabled; all stores respond with manageable timing",
|
||||||
storeAPIs: []Client{
|
storeAPIs: []Client{
|
||||||
&storetestutil.TestClient{
|
&storetestutil.TestClient{
|
||||||
StoreClient: &mockedStoreAPI{
|
StoreClient: &mockedStoreAPI{
|
||||||
|
@ -1251,7 +1253,7 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{4, 1}, {5, 2}, {6, 3}}),
|
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{4, 1}, {5, 2}, {6, 3}}),
|
||||||
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{7, 1}, {8, 2}, {9, 3}}),
|
storeSeriesResponse(t, labels.FromStrings("a", "b"), []sample{{7, 1}, {8, 2}, {9, 3}}),
|
||||||
},
|
},
|
||||||
RespDuration: 3 * time.Second,
|
RespDuration: 1 * time.Second,
|
||||||
},
|
},
|
||||||
ExtLset: []labels.Labels{labels.FromStrings("ext", "1")},
|
ExtLset: []labels.Labels{labels.FromStrings("ext", "1")},
|
||||||
MinTime: 1,
|
MinTime: 1,
|
||||||
|
@ -1264,7 +1266,7 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
storeSeriesResponse(t, labels.FromStrings("b", "c"), []sample{{4, 1}, {5, 2}, {6, 3}}),
|
storeSeriesResponse(t, labels.FromStrings("b", "c"), []sample{{4, 1}, {5, 2}, {6, 3}}),
|
||||||
storeSeriesResponse(t, labels.FromStrings("b", "c"), []sample{{7, 1}, {8, 2}, {9, 3}}),
|
storeSeriesResponse(t, labels.FromStrings("b", "c"), []sample{{7, 1}, {8, 2}, {9, 3}}),
|
||||||
},
|
},
|
||||||
RespDuration: 3 * time.Second,
|
RespDuration: 1 * time.Second,
|
||||||
},
|
},
|
||||||
ExtLset: []labels.Labels{labels.FromStrings("ext", "1")},
|
ExtLset: []labels.Labels{labels.FromStrings("ext", "1")},
|
||||||
MinTime: 1,
|
MinTime: 1,
|
||||||
|
@ -1281,12 +1283,16 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
lset: labels.FromStrings("a", "b"),
|
lset: labels.FromStrings("a", "b"),
|
||||||
chunks: [][]sample{
|
chunks: [][]sample{
|
||||||
{{1, 1}, {2, 2}, {3, 3}},
|
{{1, 1}, {2, 2}, {3, 3}},
|
||||||
|
{{4, 1}, {5, 2}, {6, 3}},
|
||||||
|
{{7, 1}, {8, 2}, {9, 3}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
lset: labels.FromStrings("b", "c"),
|
lset: labels.FromStrings("b", "c"),
|
||||||
chunks: [][]sample{
|
chunks: [][]sample{
|
||||||
{{1, 1}, {2, 2}, {3, 3}},
|
{{1, 1}, {2, 2}, {3, 3}},
|
||||||
|
{{4, 1}, {5, 2}, {6, 3}},
|
||||||
|
{{7, 1}, {8, 2}, {9, 3}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1300,22 +1306,31 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
if ok := t.Run(tc.title, func(t *testing.T) {
|
if ok := t.Run(tc.title, func(t *testing.T) {
|
||||||
for _, strategy := range []RetrievalStrategy{EagerRetrieval, LazyRetrieval} {
|
for _, strategy := range []RetrievalStrategy{EagerRetrieval, LazyRetrieval} {
|
||||||
if ok := t.Run(string(strategy), func(t *testing.T) {
|
if ok := t.Run(string(strategy), func(t *testing.T) {
|
||||||
|
// Use more reasonable timeouts
|
||||||
|
proxyTimeout := 3 * time.Second
|
||||||
|
contextTimeout := 4 * time.Second
|
||||||
|
|
||||||
|
if tc.expectTimeoutBehavior {
|
||||||
|
// For timeout tests, use shorter timeouts
|
||||||
|
proxyTimeout = 1500 * time.Millisecond
|
||||||
|
contextTimeout = 2 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
q := NewProxyStore(nil,
|
q := NewProxyStore(nil,
|
||||||
nil,
|
nil,
|
||||||
func() []Client { return tc.storeAPIs },
|
func() []Client { return tc.storeAPIs },
|
||||||
component.Query,
|
component.Query,
|
||||||
tc.selectorLabels,
|
tc.selectorLabels,
|
||||||
4*time.Second, strategy,
|
proxyTimeout, strategy,
|
||||||
options...,
|
options...,
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
s := newStoreSeriesServer(ctx)
|
s := newStoreSeriesServer(ctx)
|
||||||
|
|
||||||
t0 := time.Now()
|
|
||||||
err := q.Series(tc.req, s)
|
err := q.Series(tc.req, s)
|
||||||
elapsedTime := time.Since(t0)
|
|
||||||
if tc.expectedErr != nil {
|
if tc.expectedErr != nil {
|
||||||
testutil.NotOk(t, err)
|
testutil.NotOk(t, err)
|
||||||
testutil.Equals(t, tc.expectedErr.Error(), err.Error())
|
testutil.Equals(t, tc.expectedErr.Error(), err.Error())
|
||||||
|
@ -1327,7 +1342,7 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
seriesEquals(t, tc.expectedSeries, s.SeriesSet)
|
seriesEquals(t, tc.expectedSeries, s.SeriesSet)
|
||||||
testutil.Equals(t, tc.expectedWarningsLen, len(s.Warnings), "got %v", s.Warnings)
|
testutil.Equals(t, tc.expectedWarningsLen, len(s.Warnings), "got %v", s.Warnings)
|
||||||
|
|
||||||
testutil.Assert(t, elapsedTime < 5010*time.Millisecond, fmt.Sprintf("Request has taken %f, expected: <%d, it seems that responseTimeout doesn't work properly.", elapsedTime.Seconds(), 5))
|
// Note: We avoid timing assertions as they are flaky in CI environments
|
||||||
|
|
||||||
}); !ok {
|
}); !ok {
|
||||||
return
|
return
|
||||||
|
@ -1340,7 +1355,7 @@ func TestProxyStore_SeriesSlowStores(t *testing.T) {
|
||||||
|
|
||||||
// Wait until the last goroutine exits which is stuck on time.Sleep().
|
// Wait until the last goroutine exits which is stuck on time.Sleep().
|
||||||
// Otherwise, goleak complains.
|
// Otherwise, goleak complains.
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProxyStore_Series_RequestParamsProxied(t *testing.T) {
|
func TestProxyStore_Series_RequestParamsProxied(t *testing.T) {
|
||||||
|
|
|
@ -78,6 +78,7 @@ func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram {
|
||||||
PositiveBuckets: hp.GetPositiveDeltas(),
|
PositiveBuckets: hp.GetPositiveDeltas(),
|
||||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||||
NegativeBuckets: hp.GetNegativeDeltas(),
|
NegativeBuckets: hp.GetNegativeDeltas(),
|
||||||
|
CustomValues: hp.GetCustomValues(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +99,7 @@ func FloatHistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram
|
||||||
PositiveBuckets: hp.GetPositiveCounts(),
|
PositiveBuckets: hp.GetPositiveCounts(),
|
||||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||||
NegativeBuckets: hp.GetNegativeCounts(),
|
NegativeBuckets: hp.GetNegativeCounts(),
|
||||||
|
CustomValues: hp.GetCustomValues(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,6 +122,7 @@ func HistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram {
|
||||||
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
|
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
|
||||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||||
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
|
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
|
||||||
|
CustomValues: hp.GetCustomValues(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,6 +159,7 @@ func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogra
|
||||||
PositiveDeltas: h.PositiveBuckets,
|
PositiveDeltas: h.PositiveBuckets,
|
||||||
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||||
Timestamp: timestamp,
|
Timestamp: timestamp,
|
||||||
|
CustomValues: h.CustomValues,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,6 +177,7 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra
|
||||||
PositiveCounts: fh.PositiveBuckets,
|
PositiveCounts: fh.PositiveBuckets,
|
||||||
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||||
Timestamp: timestamp,
|
Timestamp: timestamp,
|
||||||
|
CustomValues: fh.CustomValues,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -387,6 +387,9 @@ type Histogram struct {
|
||||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||||
// conversion from time.Time to Prometheus timestamp.
|
// conversion from time.Time to Prometheus timestamp.
|
||||||
Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||||
|
// custom_values are not part of the specification, DO NOT use in remote write clients.
|
||||||
|
// Used only for converting from OpenTelemetry to Prometheus internally.
|
||||||
|
CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Histogram) Reset() { *m = Histogram{} }
|
func (m *Histogram) Reset() { *m = Histogram{} }
|
||||||
|
@ -569,6 +572,13 @@ func (m *Histogram) GetTimestamp() int64 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Histogram) GetCustomValues() []float64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.CustomValues
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
func (*Histogram) XXX_OneofWrappers() []interface{} {
|
func (*Histogram) XXX_OneofWrappers() []interface{} {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
|
@ -991,79 +1001,81 @@ func init() {
|
||||||
func init() { proto.RegisterFile("store/storepb/prompb/types.proto", fileDescriptor_166e07899dab7c14) }
|
func init() { proto.RegisterFile("store/storepb/prompb/types.proto", fileDescriptor_166e07899dab7c14) }
|
||||||
|
|
||||||
var fileDescriptor_166e07899dab7c14 = []byte{
|
var fileDescriptor_166e07899dab7c14 = []byte{
|
||||||
// 1150 bytes of a gzipped FileDescriptorProto
|
// 1171 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x8f, 0xda, 0x46,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x8f, 0x1b, 0x45,
|
||||||
0x14, 0xc7, 0x36, 0x18, 0xfc, 0x16, 0x58, 0x67, 0x9a, 0x26, 0xce, 0xb6, 0x25, 0xd4, 0xea, 0x1f,
|
0x10, 0xf6, 0x78, 0xfc, 0xac, 0xb5, 0xbd, 0x93, 0x26, 0x24, 0x93, 0x05, 0x1c, 0x33, 0xbc, 0xac,
|
||||||
0x14, 0xb5, 0x20, 0x25, 0x51, 0x7b, 0x49, 0xab, 0x2e, 0x1b, 0x36, 0x44, 0x0d, 0xa0, 0x0c, 0xac,
|
0x08, 0x6c, 0x29, 0x89, 0xe0, 0x12, 0x10, 0xbb, 0x1b, 0x6f, 0x36, 0x22, 0xb6, 0x95, 0xb6, 0x17,
|
||||||
0xda, 0xe4, 0x82, 0x06, 0x98, 0xc5, 0xd6, 0xe2, 0x3f, 0xf2, 0x0c, 0xd1, 0xd2, 0x4f, 0xd1, 0x73,
|
0x48, 0x2e, 0x56, 0x7b, 0xdc, 0xeb, 0x19, 0xc5, 0xf3, 0xd0, 0x74, 0x3b, 0x8a, 0xf9, 0x15, 0x9c,
|
||||||
0x6f, 0x55, 0x6f, 0xed, 0xa9, 0xdf, 0x22, 0xc7, 0x9c, 0xaa, 0x2a, 0x87, 0xa8, 0xda, 0xfd, 0x22,
|
0xb9, 0x21, 0x6e, 0x70, 0xe2, 0x5f, 0xe4, 0x98, 0x13, 0x42, 0x1c, 0x22, 0x94, 0xfc, 0x10, 0x50,
|
||||||
0xd5, 0x8c, 0x6d, 0x0c, 0x4b, 0x23, 0xf5, 0x94, 0x0b, 0x7a, 0xef, 0xf7, 0xfe, 0xfd, 0x3c, 0xef,
|
0xd7, 0xcc, 0x78, 0xec, 0x98, 0x48, 0x9c, 0x72, 0xb1, 0xba, 0xbe, 0x7a, 0x7d, 0x3d, 0x55, 0x5d,
|
||||||
0xcd, 0x3c, 0xa0, 0xce, 0x78, 0x10, 0xd1, 0x96, 0xfc, 0x0d, 0x27, 0xad, 0x30, 0x0a, 0xbc, 0x70,
|
0x65, 0x68, 0x09, 0x19, 0x44, 0xbc, 0x8b, 0xbf, 0xe1, 0xb4, 0x1b, 0x46, 0x81, 0x17, 0x4e, 0xbb,
|
||||||
0xd2, 0xe2, 0xab, 0x90, 0xb2, 0x66, 0x18, 0x05, 0x3c, 0x40, 0xfb, 0x02, 0xa3, 0xdc, 0xa1, 0x4b,
|
0x72, 0x15, 0x72, 0xd1, 0x09, 0xa3, 0x40, 0x06, 0x64, 0x5f, 0x61, 0x5c, 0x3a, 0x7c, 0x29, 0x26,
|
||||||
0x36, 0x9e, 0x06, 0xe1, 0xea, 0xe0, 0xfa, 0x3c, 0x98, 0x07, 0xd2, 0xd6, 0x12, 0x52, 0xec, 0x76,
|
0x76, 0x10, 0xae, 0x0e, 0x2e, 0xce, 0x83, 0x79, 0x80, 0xba, 0xae, 0x3a, 0xc5, 0x66, 0x07, 0x57,
|
||||||
0x70, 0x2b, 0x4e, 0xb4, 0x20, 0x13, 0xba, 0xd8, 0xce, 0x60, 0xff, 0xaa, 0x42, 0xb5, 0x47, 0x79,
|
0xe2, 0x40, 0x0b, 0x36, 0xe5, 0x8b, 0xed, 0x08, 0xd6, 0xcf, 0x79, 0x68, 0xf4, 0xb9, 0x8c, 0x5c,
|
||||||
0xe4, 0x4e, 0x7b, 0x94, 0x93, 0x19, 0xe1, 0x04, 0x7d, 0x0b, 0x79, 0xe1, 0x61, 0x29, 0x75, 0xa5,
|
0xbb, 0xcf, 0x25, 0x9b, 0x31, 0xc9, 0xc8, 0x57, 0x50, 0x50, 0x16, 0xa6, 0xd6, 0xd2, 0xda, 0x8d,
|
||||||
0x51, 0xbd, 0x7b, 0xa7, 0x79, 0xa5, 0x46, 0x73, 0xdb, 0x3d, 0x51, 0x47, 0xab, 0x90, 0x62, 0x19,
|
0xeb, 0xd7, 0x3a, 0xaf, 0xe4, 0xe8, 0x6c, 0x9b, 0x27, 0xe2, 0x78, 0x15, 0x72, 0x8a, 0x7e, 0xe4,
|
||||||
0x87, 0xbe, 0x00, 0xe4, 0x49, 0x6c, 0x7c, 0x4a, 0x3c, 0x77, 0xb1, 0x1a, 0xfb, 0xc4, 0xa3, 0x96,
|
0x53, 0x20, 0x1e, 0x62, 0x93, 0x73, 0xe6, 0xb9, 0x8b, 0xd5, 0xc4, 0x67, 0x1e, 0x37, 0xf3, 0x2d,
|
||||||
0x5a, 0x57, 0x1a, 0x06, 0x36, 0x63, 0xcb, 0xb1, 0x34, 0xf4, 0x89, 0x47, 0x11, 0x82, 0xbc, 0x43,
|
0xad, 0x5d, 0xa5, 0x46, 0xac, 0x39, 0x41, 0xc5, 0x80, 0x79, 0x9c, 0x10, 0x28, 0x38, 0x7c, 0x11,
|
||||||
0x17, 0xa1, 0x95, 0x97, 0x76, 0x29, 0x0b, 0x6c, 0xe9, 0xbb, 0xdc, 0x2a, 0xc4, 0x98, 0x90, 0xed,
|
0x9a, 0x05, 0xd4, 0xe3, 0x59, 0x61, 0x4b, 0xdf, 0x95, 0x66, 0x31, 0xc6, 0xd4, 0xd9, 0x5a, 0x01,
|
||||||
0x15, 0x40, 0x56, 0x09, 0xed, 0x41, 0xf1, 0xa4, 0xff, 0x7d, 0x7f, 0xf0, 0x43, 0xdf, 0xcc, 0x09,
|
0x64, 0x99, 0xc8, 0x1e, 0x94, 0xcf, 0x06, 0xdf, 0x0c, 0x86, 0xdf, 0x0d, 0x8c, 0x9c, 0x12, 0x8e,
|
||||||
0xe5, 0x68, 0x70, 0xd2, 0x1f, 0x75, 0xb0, 0xa9, 0x20, 0x03, 0x0a, 0x8f, 0x0e, 0x4f, 0x1e, 0x75,
|
0x87, 0x67, 0x83, 0x71, 0x8f, 0x1a, 0x1a, 0xa9, 0x42, 0xf1, 0xce, 0xe1, 0xd9, 0x9d, 0x9e, 0x91,
|
||||||
0x4c, 0x15, 0x55, 0xc0, 0xe8, 0x3e, 0x1e, 0x8e, 0x06, 0x8f, 0xf0, 0x61, 0xcf, 0xd4, 0x10, 0x82,
|
0x27, 0x75, 0xa8, 0x9e, 0xde, 0x1d, 0x8d, 0x87, 0x77, 0xe8, 0x61, 0xdf, 0xd0, 0x09, 0x81, 0x06,
|
||||||
0xaa, 0xb4, 0x64, 0x58, 0x5e, 0x84, 0x0e, 0x4f, 0x7a, 0xbd, 0x43, 0xfc, 0xcc, 0x2c, 0xa0, 0x12,
|
0x6a, 0x32, 0xac, 0xa0, 0x5c, 0x47, 0x67, 0xfd, 0xfe, 0x21, 0x7d, 0x60, 0x14, 0x49, 0x05, 0x0a,
|
||||||
0xe4, 0x1f, 0xf7, 0x8f, 0x07, 0xa6, 0x8e, 0xca, 0x50, 0x1a, 0x8e, 0x0e, 0x47, 0x9d, 0x61, 0x67,
|
0x77, 0x07, 0x27, 0x43, 0xa3, 0x44, 0x6a, 0x50, 0x19, 0x8d, 0x0f, 0xc7, 0xbd, 0x51, 0x6f, 0x6c,
|
||||||
0x64, 0x16, 0xed, 0x07, 0xa0, 0x0f, 0x89, 0x17, 0x2e, 0x28, 0xba, 0x0e, 0x85, 0x17, 0x64, 0xb1,
|
0x94, 0xad, 0x5b, 0x50, 0x1a, 0x31, 0x2f, 0x5c, 0x70, 0x72, 0x11, 0x8a, 0x8f, 0xd9, 0x62, 0x19,
|
||||||
0x8c, 0xcf, 0x46, 0xc1, 0xb1, 0x82, 0x3e, 0x04, 0x83, 0xbb, 0x1e, 0x65, 0x9c, 0x78, 0xa1, 0xfc,
|
0x7f, 0x1b, 0x8d, 0xc6, 0x02, 0x79, 0x17, 0xaa, 0xd2, 0xf5, 0xb8, 0x90, 0xcc, 0x0b, 0xf1, 0x9e,
|
||||||
0x4e, 0x0d, 0x67, 0x80, 0xfd, 0x9b, 0x02, 0xa5, 0xce, 0x39, 0xf5, 0xc2, 0x05, 0x89, 0xd0, 0x14,
|
0x3a, 0xcd, 0x00, 0xeb, 0x17, 0x0d, 0x2a, 0xbd, 0x27, 0xdc, 0x0b, 0x17, 0x2c, 0x22, 0x36, 0x94,
|
||||||
0x74, 0xd9, 0x05, 0x66, 0x29, 0x75, 0xad, 0xb1, 0x77, 0xb7, 0xd2, 0xe4, 0x0e, 0xf1, 0x03, 0xd6,
|
0xb0, 0x0a, 0xc2, 0xd4, 0x5a, 0x7a, 0x7b, 0xef, 0x7a, 0xbd, 0x23, 0x1d, 0xe6, 0x07, 0xa2, 0x73,
|
||||||
0x7c, 0x22, 0xd0, 0xf6, 0x83, 0x97, 0x6f, 0x6e, 0xe7, 0x5e, 0xbf, 0xb9, 0x7d, 0x7f, 0xee, 0x72,
|
0x4f, 0xa1, 0x47, 0xb7, 0x9e, 0x3e, 0xbf, 0x9a, 0xfb, 0xeb, 0xf9, 0xd5, 0x9b, 0x73, 0x57, 0x3a,
|
||||||
0x67, 0x39, 0x69, 0x4e, 0x03, 0xaf, 0x15, 0x3b, 0x7c, 0xe9, 0x06, 0x89, 0xd4, 0x0a, 0xcf, 0xe6,
|
0xcb, 0x69, 0xc7, 0x0e, 0xbc, 0x6e, 0x6c, 0xf0, 0x99, 0x1b, 0x24, 0xa7, 0x6e, 0xf8, 0x68, 0xde,
|
||||||
0xad, 0xad, 0x86, 0x36, 0x9f, 0xcb, 0x68, 0x9c, 0xa4, 0xce, 0x58, 0xaa, 0x6f, 0x65, 0xa9, 0x5d,
|
0xdd, 0x2a, 0x68, 0xe7, 0x21, 0x7a, 0xd3, 0x24, 0x74, 0xc6, 0x32, 0xff, 0x5a, 0x96, 0xfa, 0xab,
|
||||||
0x65, 0xf9, 0xba, 0x00, 0x46, 0xd7, 0x65, 0x3c, 0x98, 0x47, 0xc4, 0x43, 0x1f, 0x81, 0x31, 0x0d,
|
0x2c, 0xff, 0x29, 0x42, 0xf5, 0xd4, 0x15, 0x32, 0x98, 0x47, 0xcc, 0x23, 0xef, 0x41, 0xd5, 0x0e,
|
||||||
0x96, 0x3e, 0x1f, 0xbb, 0x3e, 0x97, 0xdf, 0x9a, 0xef, 0xe6, 0x70, 0x49, 0x42, 0x8f, 0x7d, 0x8e,
|
0x96, 0xbe, 0x9c, 0xb8, 0xbe, 0xc4, 0xbb, 0x16, 0x4e, 0x73, 0xb4, 0x82, 0xd0, 0x5d, 0x5f, 0x92,
|
||||||
0x3e, 0x86, 0xbd, 0xd8, 0x7c, 0xba, 0x08, 0x08, 0x8f, 0xcb, 0x74, 0x73, 0x18, 0x24, 0x78, 0x2c,
|
0xf7, 0x61, 0x2f, 0x56, 0x9f, 0x2f, 0x02, 0x26, 0xe3, 0x34, 0xa7, 0x39, 0x0a, 0x08, 0x9e, 0x28,
|
||||||
0x30, 0x64, 0x82, 0xc6, 0x96, 0x9e, 0xac, 0xa3, 0x60, 0x21, 0xa2, 0x1b, 0xa0, 0xb3, 0xa9, 0x43,
|
0x8c, 0x18, 0xa0, 0x8b, 0xa5, 0x87, 0x79, 0x34, 0xaa, 0x8e, 0xe4, 0x12, 0x94, 0x84, 0xed, 0x70,
|
||||||
0x3d, 0x22, 0x5b, 0x7d, 0x0d, 0x27, 0x1a, 0xfa, 0x14, 0xaa, 0x3f, 0xd1, 0x28, 0x18, 0x73, 0x27,
|
0x8f, 0x61, 0xa9, 0x2f, 0xd0, 0x44, 0x22, 0x1f, 0x41, 0xe3, 0x07, 0x1e, 0x05, 0x13, 0xe9, 0x44,
|
||||||
0xa2, 0xcc, 0x09, 0x16, 0x33, 0xd9, 0x76, 0x05, 0x57, 0x04, 0x3a, 0x4a, 0x41, 0xf4, 0x59, 0xe2,
|
0x5c, 0x38, 0xc1, 0x62, 0x86, 0x65, 0xd7, 0x68, 0x5d, 0xa1, 0xe3, 0x14, 0x24, 0x1f, 0x27, 0x66,
|
||||||
0x96, 0xf1, 0xd2, 0x25, 0x2f, 0x05, 0x97, 0x05, 0x7e, 0x94, 0x72, 0xbb, 0x03, 0xe6, 0x86, 0x5f,
|
0x19, 0xaf, 0x12, 0xf2, 0xd2, 0x68, 0x4d, 0xe1, 0xc7, 0x29, 0xb7, 0x6b, 0x60, 0x6c, 0xd8, 0xc5,
|
||||||
0x4c, 0xb0, 0x28, 0x09, 0x2a, 0xb8, 0xba, 0xf6, 0x8c, 0x49, 0x76, 0xa1, 0xea, 0xd3, 0x39, 0xe1,
|
0x04, 0xcb, 0x48, 0x50, 0xa3, 0x8d, 0xb5, 0x65, 0x4c, 0xf2, 0x14, 0x1a, 0x3e, 0x9f, 0x33, 0xe9,
|
||||||
0xee, 0x0b, 0x3a, 0x66, 0x21, 0xf1, 0x99, 0x55, 0x92, 0x5d, 0xf9, 0x60, 0x67, 0xe6, 0xdb, 0xcb,
|
0x3e, 0xe6, 0x13, 0x11, 0x32, 0x5f, 0x98, 0x15, 0xac, 0xca, 0x3b, 0x3b, 0x3d, 0x7f, 0xb4, 0xb4,
|
||||||
0xe9, 0x19, 0xe5, 0xc3, 0x90, 0xf8, 0xed, 0xbc, 0xe8, 0x11, 0xae, 0xa4, 0x81, 0x02, 0x63, 0xe8,
|
0x1f, 0x71, 0x39, 0x0a, 0x99, 0x7f, 0x54, 0x50, 0x35, 0xa2, 0xf5, 0xd4, 0x51, 0x61, 0x82, 0x7c,
|
||||||
0x73, 0xd8, 0x5f, 0x67, 0x9a, 0xd1, 0x05, 0x27, 0xcc, 0x32, 0xea, 0x5a, 0x03, 0xe1, 0x75, 0x81,
|
0x02, 0xfb, 0xeb, 0x48, 0x33, 0xbe, 0x90, 0x4c, 0x98, 0xd5, 0x96, 0xde, 0x26, 0x74, 0x9d, 0xe0,
|
||||||
0x87, 0x12, 0xdd, 0x72, 0x94, 0x14, 0x99, 0x05, 0x75, 0xad, 0xa1, 0x64, 0x8e, 0x92, 0x1f, 0x13,
|
0x36, 0xa2, 0x5b, 0x86, 0x48, 0x51, 0x98, 0xd0, 0xd2, 0xdb, 0x5a, 0x66, 0x88, 0xfc, 0x84, 0xe2,
|
||||||
0xdc, 0xc2, 0x80, 0xb9, 0x1b, 0xdc, 0xf6, 0xfe, 0x37, 0xb7, 0x34, 0x70, 0xcd, 0x6d, 0x9d, 0x29,
|
0x16, 0x06, 0xc2, 0xdd, 0xe0, 0xb6, 0xf7, 0xbf, 0xb9, 0xa5, 0x8e, 0x6b, 0x6e, 0xeb, 0x48, 0x09,
|
||||||
0xe1, 0x56, 0x8e, 0xb9, 0xa5, 0x70, 0xc6, 0x6d, 0xed, 0x98, 0x70, 0xab, 0xc4, 0xdc, 0x52, 0x38,
|
0xb7, 0x5a, 0xcc, 0x2d, 0x85, 0x33, 0x6e, 0x6b, 0xc3, 0x84, 0x5b, 0x3d, 0xe6, 0x96, 0xc2, 0x09,
|
||||||
0xe1, 0x76, 0x04, 0x10, 0x51, 0x46, 0xf9, 0xd8, 0x11, 0x7d, 0xa8, 0xca, 0x77, 0xe2, 0x93, 0x1d,
|
0xb7, 0x63, 0x80, 0x88, 0x0b, 0x2e, 0x27, 0x8e, 0xaa, 0x43, 0x03, 0xe7, 0xc4, 0x87, 0x3b, 0xbc,
|
||||||
0x5e, 0xeb, 0x71, 0x6a, 0x62, 0xe1, 0xdc, 0x75, 0x7d, 0x8e, 0x8d, 0x28, 0x15, 0xb7, 0xe7, 0x71,
|
0xd6, 0xed, 0xd4, 0xa1, 0xca, 0xf8, 0xd4, 0xf5, 0x25, 0xad, 0x46, 0xe9, 0x71, 0xbb, 0x1f, 0xf7,
|
||||||
0xff, 0xea, 0x3c, 0xde, 0x07, 0x63, 0x1d, 0xb5, 0x7d, 0xdb, 0x8b, 0xa0, 0x3d, 0xeb, 0x0c, 0x4d,
|
0x5f, 0xe9, 0x47, 0xf2, 0x01, 0xd4, 0xed, 0xa5, 0x90, 0x81, 0x37, 0xc1, 0xee, 0x15, 0xa6, 0x81,
|
||||||
0x05, 0xe9, 0xa0, 0xf6, 0x07, 0xa6, 0x9a, 0xdd, 0x78, 0xad, 0x5d, 0x84, 0x82, 0x24, 0xde, 0x2e,
|
0x4c, 0x6a, 0x31, 0xf8, 0x2d, 0x62, 0xd6, 0x4d, 0xa8, 0xae, 0x43, 0x6f, 0x8f, 0x84, 0x32, 0xe8,
|
||||||
0x03, 0x64, 0x53, 0x60, 0x3f, 0x00, 0xc8, 0x0e, 0x49, 0x0c, 0x62, 0x70, 0x7a, 0xca, 0x68, 0x3c,
|
0x0f, 0x7a, 0x23, 0x43, 0x23, 0x25, 0xc8, 0x0f, 0x86, 0x46, 0x3e, 0x1b, 0x0b, 0xfa, 0x51, 0x19,
|
||||||
0xd9, 0xd7, 0x70, 0xa2, 0x09, 0x7c, 0x41, 0xfd, 0x39, 0x77, 0xe4, 0x40, 0x57, 0x70, 0xa2, 0xd9,
|
0x8a, 0x78, 0xbb, 0xa3, 0x1a, 0x40, 0xd6, 0x2a, 0xd6, 0x2d, 0x80, 0xec, 0x4b, 0xaa, 0x6e, 0x0d,
|
||||||
0x7f, 0xa8, 0x00, 0x23, 0xd7, 0xa3, 0x43, 0x1a, 0xb9, 0x94, 0xbd, 0x9b, 0x2b, 0xfc, 0x35, 0x14,
|
0xce, 0xcf, 0x05, 0x8f, 0xdb, 0xff, 0x02, 0x4d, 0x24, 0x85, 0x2f, 0xb8, 0x3f, 0x97, 0x0e, 0x76,
|
||||||
0x99, 0x7c, 0x72, 0x98, 0xa5, 0xca, 0x2a, 0x37, 0x77, 0x8e, 0x37, 0x7e, 0x92, 0x92, 0x96, 0xa7,
|
0x7d, 0x9d, 0x26, 0x92, 0xf5, 0x5b, 0x1e, 0x60, 0xec, 0x7a, 0x7c, 0xc4, 0x23, 0x97, 0x8b, 0x37,
|
||||||
0xde, 0xe8, 0x1b, 0x30, 0x68, 0xf2, 0xd8, 0x30, 0x4b, 0x93, 0xa1, 0xb7, 0x76, 0x42, 0xd3, 0xe7,
|
0xf3, 0xce, 0xbf, 0x80, 0xb2, 0xc0, 0xb9, 0x24, 0xcc, 0x3c, 0x66, 0xb9, 0xbc, 0x53, 0x83, 0x78,
|
||||||
0x28, 0x09, 0xce, 0x22, 0xd0, 0x77, 0x00, 0x4e, 0xda, 0x36, 0x66, 0xe5, 0x65, 0xfc, 0xc1, 0xdb,
|
0x6e, 0x25, 0x7d, 0x91, 0x5a, 0x93, 0x2f, 0xa1, 0xca, 0x93, 0x89, 0x24, 0x4c, 0x1d, 0x5d, 0xaf,
|
||||||
0x3b, 0x9b, 0x24, 0xd8, 0x88, 0xb1, 0x7f, 0x51, 0xa0, 0x2c, 0xbf, 0xa5, 0x47, 0xf8, 0xd4, 0xa1,
|
0xec, 0xb8, 0xa6, 0x33, 0x2b, 0x71, 0xce, 0x3c, 0xc8, 0xd7, 0x00, 0x4e, 0x5a, 0x5b, 0x61, 0x16,
|
||||||
0x11, 0xfa, 0x6a, 0x6b, 0x9d, 0xd8, 0x3b, 0xc9, 0x36, 0x9d, 0x9b, 0x1b, 0x6b, 0x04, 0x41, 0x7e,
|
0xd0, 0xff, 0xe0, 0xf5, 0xe5, 0x4f, 0x02, 0x6c, 0xf8, 0x58, 0x3f, 0x69, 0x50, 0xc3, 0xbb, 0xf4,
|
||||||
0x63, 0x71, 0x48, 0x39, 0x7b, 0xd9, 0x34, 0x09, 0xc6, 0x8a, 0xdd, 0x80, 0xbc, 0x5c, 0x0a, 0x3a,
|
0x99, 0xb4, 0x1d, 0x1e, 0x91, 0xcf, 0xb7, 0x76, 0x8e, 0xb5, 0x13, 0x6c, 0xd3, 0xb8, 0xb3, 0xb1,
|
||||||
0xa8, 0x9d, 0xa7, 0xf1, 0x84, 0xf4, 0x3b, 0x4f, 0xe3, 0x09, 0xc1, 0x62, 0x11, 0x08, 0x00, 0x77,
|
0x6b, 0x08, 0x14, 0x36, 0xb6, 0x0b, 0x9e, 0xb3, 0xf1, 0xa7, 0x23, 0x18, 0x0b, 0x56, 0x1b, 0x0a,
|
||||||
0x4c, 0xcd, 0xfe, 0x53, 0x11, 0x63, 0x45, 0x66, 0x62, 0xaa, 0x18, 0xba, 0x09, 0x45, 0xc6, 0x69,
|
0xb8, 0x39, 0x4a, 0x90, 0xef, 0xdd, 0x8f, 0x3b, 0x64, 0xd0, 0xbb, 0x1f, 0x77, 0x08, 0x55, 0xdb,
|
||||||
0x38, 0xf6, 0x98, 0x24, 0xa7, 0x61, 0x5d, 0xa8, 0x3d, 0x26, 0x4a, 0x9f, 0x2e, 0xfd, 0x69, 0x5a,
|
0x42, 0x01, 0xb4, 0x67, 0xe8, 0xd6, 0xef, 0x9a, 0x6a, 0x2b, 0x36, 0x53, 0x5d, 0x25, 0xc8, 0x65,
|
||||||
0x5a, 0xc8, 0xe8, 0x16, 0x94, 0x18, 0x27, 0x11, 0x17, 0xde, 0xf1, 0xeb, 0x59, 0x94, 0x7a, 0x8f,
|
0x28, 0x0b, 0xc9, 0xc3, 0x89, 0x27, 0x90, 0x9c, 0x4e, 0x4b, 0x4a, 0xec, 0x0b, 0x95, 0xfa, 0x7c,
|
||||||
0xa1, 0xf7, 0x41, 0xa7, 0xfe, 0x6c, 0x2c, 0x0f, 0x4c, 0x18, 0x0a, 0xd4, 0x9f, 0xf5, 0x18, 0x3a,
|
0xe9, 0xdb, 0x69, 0x6a, 0x75, 0x26, 0x57, 0xa0, 0x22, 0x24, 0x8b, 0xa4, 0xb2, 0x8e, 0x47, 0x6c,
|
||||||
0x80, 0xd2, 0x3c, 0x0a, 0x96, 0xa1, 0xeb, 0xcf, 0xad, 0x42, 0x5d, 0x6b, 0x18, 0x78, 0xad, 0xa3,
|
0x19, 0xe5, 0xbe, 0x20, 0x6f, 0x43, 0x89, 0xfb, 0xb3, 0x09, 0x7e, 0x30, 0xa5, 0x28, 0x72, 0x7f,
|
||||||
0x2a, 0xa8, 0x93, 0x95, 0x7c, 0xc1, 0x4a, 0x58, 0x9d, 0xac, 0x44, 0xf6, 0x88, 0xf8, 0x73, 0x2a,
|
0xd6, 0x17, 0xe4, 0x00, 0x2a, 0xf3, 0x28, 0x58, 0x86, 0xae, 0x3f, 0x37, 0x8b, 0x2d, 0xbd, 0x5d,
|
||||||
0x92, 0x14, 0xe3, 0xec, 0x52, 0xef, 0x31, 0xfb, 0x2f, 0x05, 0x0a, 0x47, 0xce, 0xd2, 0x3f, 0x43,
|
0xa5, 0x6b, 0x99, 0x34, 0x20, 0x3f, 0x5d, 0xe1, 0x98, 0xab, 0xd0, 0xfc, 0x74, 0xa5, 0xa2, 0x47,
|
||||||
0x35, 0xd8, 0xf3, 0x5c, 0x7f, 0x2c, 0x2e, 0x49, 0xc6, 0xd9, 0xf0, 0x5c, 0x5f, 0x4c, 0x67, 0x8f,
|
0xcc, 0x9f, 0x73, 0x15, 0xa4, 0x1c, 0x47, 0x47, 0xb9, 0x2f, 0xac, 0x3f, 0x34, 0x28, 0x1e, 0x3b,
|
||||||
0x49, 0x3b, 0x39, 0x5f, 0xdb, 0x93, 0x4d, 0xe4, 0x91, 0xf3, 0xc4, 0x7e, 0x2f, 0xe9, 0x84, 0x26,
|
0x4b, 0xff, 0x11, 0x69, 0xc2, 0x9e, 0xe7, 0xfa, 0x13, 0xf5, 0x92, 0x32, 0xce, 0x55, 0xcf, 0xf5,
|
||||||
0x3b, 0x71, 0x7b, 0xa7, 0x13, 0xb2, 0x4a, 0xb3, 0xe3, 0x4f, 0x83, 0x99, 0xeb, 0xcf, 0xb3, 0x36,
|
0x55, 0x77, 0xf6, 0x05, 0xea, 0xd9, 0x93, 0xb5, 0x3e, 0x59, 0x57, 0x1e, 0x7b, 0x92, 0xe8, 0x6f,
|
||||||
0x88, 0x35, 0x2f, 0x3f, 0xad, 0x8c, 0xa5, 0x6c, 0x3f, 0x84, 0x52, 0xea, 0xb5, 0x73, 0x37, 0x7f,
|
0x24, 0x95, 0xd0, 0xb1, 0x12, 0x57, 0x77, 0x2a, 0x81, 0x59, 0x3a, 0x3d, 0xdf, 0x0e, 0x66, 0xae,
|
||||||
0x1c, 0x88, 0x2d, 0xbc, 0xb5, 0x7a, 0x55, 0xf4, 0x1e, 0xec, 0x1f, 0x3f, 0x19, 0x1c, 0x8e, 0xc6,
|
0x3f, 0xcf, 0xca, 0xa0, 0xfe, 0x0b, 0xe0, 0xd5, 0x6a, 0x14, 0xcf, 0xd6, 0x6d, 0xa8, 0xa4, 0x56,
|
||||||
0x1b, 0xfb, 0xd8, 0xfe, 0x5d, 0x81, 0x8a, 0x2c, 0x49, 0x67, 0xef, 0xf2, 0x6a, 0xdd, 0x07, 0x7d,
|
0x3b, 0x6f, 0xf3, 0xfb, 0xa1, 0x5a, 0xd5, 0x5b, 0xfb, 0x39, 0x4f, 0xde, 0x82, 0xfd, 0x93, 0x7b,
|
||||||
0x2a, 0xaa, 0xa6, 0x37, 0xeb, 0xc6, 0x7f, 0x9f, 0x43, 0x32, 0xda, 0x89, 0x6f, 0xbb, 0xfe, 0xf2,
|
0xc3, 0xc3, 0xf1, 0x64, 0x63, 0x69, 0x5b, 0xbf, 0x6a, 0x50, 0xc7, 0x94, 0x7c, 0xf6, 0x26, 0x9f,
|
||||||
0xa2, 0xa6, 0xbc, 0xba, 0xa8, 0x29, 0xff, 0x5c, 0xd4, 0x94, 0x9f, 0x2f, 0x6b, 0xb9, 0x57, 0x97,
|
0xd6, 0x4d, 0x28, 0xd9, 0x2a, 0x6b, 0xfa, 0xb2, 0x2e, 0xfd, 0xf7, 0x77, 0x48, 0x5a, 0x3b, 0xb1,
|
||||||
0xb5, 0xdc, 0xdf, 0x97, 0xb5, 0xdc, 0x73, 0x3d, 0xfe, 0x5f, 0x36, 0xd1, 0xe5, 0x1f, 0xaa, 0x7b,
|
0x3d, 0x6a, 0x3d, 0x7d, 0xd1, 0xd4, 0x9e, 0xbd, 0x68, 0x6a, 0x7f, 0xbf, 0x68, 0x6a, 0x3f, 0xbe,
|
||||||
0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x67, 0xa4, 0x22, 0x7f, 0xb6, 0x09, 0x00, 0x00,
|
0x6c, 0xe6, 0x9e, 0xbd, 0x6c, 0xe6, 0xfe, 0x7c, 0xd9, 0xcc, 0x3d, 0x2c, 0xc5, 0x7f, 0xde, 0xa6,
|
||||||
|
0x25, 0xfc, 0xd7, 0x75, 0xe3, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0xc8, 0x95, 0x86, 0xdb,
|
||||||
|
0x09, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
|
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
|
||||||
|
@ -1217,6 +1229,18 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if len(m.CustomValues) > 0 {
|
||||||
|
for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
f1 := math.Float64bits(float64(m.CustomValues[iNdEx]))
|
||||||
|
i -= 8
|
||||||
|
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1))
|
||||||
|
}
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x82
|
||||||
|
}
|
||||||
if m.Timestamp != 0 {
|
if m.Timestamp != 0 {
|
||||||
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
|
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
|
||||||
i--
|
i--
|
||||||
|
@ -1229,30 +1253,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
}
|
}
|
||||||
if len(m.PositiveCounts) > 0 {
|
if len(m.PositiveCounts) > 0 {
|
||||||
for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
|
for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
|
f2 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
|
||||||
i -= 8
|
i -= 8
|
||||||
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1))
|
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2))
|
||||||
}
|
}
|
||||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8))
|
i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8))
|
||||||
i--
|
i--
|
||||||
dAtA[i] = 0x6a
|
dAtA[i] = 0x6a
|
||||||
}
|
}
|
||||||
if len(m.PositiveDeltas) > 0 {
|
if len(m.PositiveDeltas) > 0 {
|
||||||
var j2 int
|
var j3 int
|
||||||
dAtA4 := make([]byte, len(m.PositiveDeltas)*10)
|
dAtA5 := make([]byte, len(m.PositiveDeltas)*10)
|
||||||
for _, num := range m.PositiveDeltas {
|
for _, num := range m.PositiveDeltas {
|
||||||
x3 := (uint64(num) << 1) ^ uint64((num >> 63))
|
x4 := (uint64(num) << 1) ^ uint64((num >> 63))
|
||||||
for x3 >= 1<<7 {
|
for x4 >= 1<<7 {
|
||||||
dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80)
|
dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80)
|
||||||
j2++
|
j3++
|
||||||
x3 >>= 7
|
x4 >>= 7
|
||||||
}
|
}
|
||||||
dAtA4[j2] = uint8(x3)
|
dAtA5[j3] = uint8(x4)
|
||||||
j2++
|
j3++
|
||||||
}
|
}
|
||||||
i -= j2
|
i -= j3
|
||||||
copy(dAtA[i:], dAtA4[:j2])
|
copy(dAtA[i:], dAtA5[:j3])
|
||||||
i = encodeVarintTypes(dAtA, i, uint64(j2))
|
i = encodeVarintTypes(dAtA, i, uint64(j3))
|
||||||
i--
|
i--
|
||||||
dAtA[i] = 0x62
|
dAtA[i] = 0x62
|
||||||
}
|
}
|
||||||
|
@ -1272,30 +1296,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
}
|
}
|
||||||
if len(m.NegativeCounts) > 0 {
|
if len(m.NegativeCounts) > 0 {
|
||||||
for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
|
for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
|
f6 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
|
||||||
i -= 8
|
i -= 8
|
||||||
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5))
|
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
|
||||||
}
|
}
|
||||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8))
|
i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8))
|
||||||
i--
|
i--
|
||||||
dAtA[i] = 0x52
|
dAtA[i] = 0x52
|
||||||
}
|
}
|
||||||
if len(m.NegativeDeltas) > 0 {
|
if len(m.NegativeDeltas) > 0 {
|
||||||
var j6 int
|
var j7 int
|
||||||
dAtA8 := make([]byte, len(m.NegativeDeltas)*10)
|
dAtA9 := make([]byte, len(m.NegativeDeltas)*10)
|
||||||
for _, num := range m.NegativeDeltas {
|
for _, num := range m.NegativeDeltas {
|
||||||
x7 := (uint64(num) << 1) ^ uint64((num >> 63))
|
x8 := (uint64(num) << 1) ^ uint64((num >> 63))
|
||||||
for x7 >= 1<<7 {
|
for x8 >= 1<<7 {
|
||||||
dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80)
|
dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80)
|
||||||
j6++
|
j7++
|
||||||
x7 >>= 7
|
x8 >>= 7
|
||||||
}
|
}
|
||||||
dAtA8[j6] = uint8(x7)
|
dAtA9[j7] = uint8(x8)
|
||||||
j6++
|
j7++
|
||||||
}
|
}
|
||||||
i -= j6
|
i -= j7
|
||||||
copy(dAtA[i:], dAtA8[:j6])
|
copy(dAtA[i:], dAtA9[:j7])
|
||||||
i = encodeVarintTypes(dAtA, i, uint64(j6))
|
i = encodeVarintTypes(dAtA, i, uint64(j7))
|
||||||
i--
|
i--
|
||||||
dAtA[i] = 0x4a
|
dAtA[i] = 0x4a
|
||||||
}
|
}
|
||||||
|
@ -1850,6 +1874,9 @@ func (m *Histogram) Size() (n int) {
|
||||||
if m.Timestamp != 0 {
|
if m.Timestamp != 0 {
|
||||||
n += 1 + sovTypes(uint64(m.Timestamp))
|
n += 1 + sovTypes(uint64(m.Timestamp))
|
||||||
}
|
}
|
||||||
|
if len(m.CustomValues) > 0 {
|
||||||
|
n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2903,6 +2930,60 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case 16:
|
||||||
|
if wireType == 1 {
|
||||||
|
var v uint64
|
||||||
|
if (iNdEx + 8) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
|
||||||
|
iNdEx += 8
|
||||||
|
v2 := float64(math.Float64frombits(v))
|
||||||
|
m.CustomValues = append(m.CustomValues, v2)
|
||||||
|
} else if wireType == 2 {
|
||||||
|
var packedLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTypes
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
packedLen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if packedLen < 0 {
|
||||||
|
return ErrInvalidLengthTypes
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + packedLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthTypes
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
var elementCount int
|
||||||
|
elementCount = packedLen / 8
|
||||||
|
if elementCount != 0 && len(m.CustomValues) == 0 {
|
||||||
|
m.CustomValues = make([]float64, 0, elementCount)
|
||||||
|
}
|
||||||
|
for iNdEx < postIndex {
|
||||||
|
var v uint64
|
||||||
|
if (iNdEx + 8) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
|
||||||
|
iNdEx += 8
|
||||||
|
v2 := float64(math.Float64frombits(v))
|
||||||
|
m.CustomValues = append(m.CustomValues, v2)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||||
|
|
|
@ -115,6 +115,10 @@ message Histogram {
|
||||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||||
// conversion from time.Time to Prometheus timestamp.
|
// conversion from time.Time to Prometheus timestamp.
|
||||||
int64 timestamp = 15;
|
int64 timestamp = 15;
|
||||||
|
|
||||||
|
// custom_values are not part of the specification, DO NOT use in remote write clients.
|
||||||
|
// Used only for converting from OpenTelemetry to Prometheus internally.
|
||||||
|
repeated double custom_values = 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
// A BucketSpan defines a number of consecutive buckets with their
|
// A BucketSpan defines a number of consecutive buckets with their
|
||||||
|
|
|
@ -29,8 +29,10 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
|
"github.com/prometheus/prometheus/util/compression"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
"github.com/thanos-io/thanos/pkg/compressutil"
|
||||||
"github.com/thanos-io/thanos/pkg/store/hintspb"
|
"github.com/thanos-io/thanos/pkg/store/hintspb"
|
||||||
"github.com/thanos-io/thanos/pkg/store/labelpb"
|
"github.com/thanos-io/thanos/pkg/store/labelpb"
|
||||||
"github.com/thanos-io/thanos/pkg/store/storepb"
|
"github.com/thanos-io/thanos/pkg/store/storepb"
|
||||||
|
@ -102,7 +104,7 @@ func CreateHeadWithSeries(t testing.TB, j int, opts HeadGenOptions) (*tsdb.Head,
|
||||||
var w *wlog.WL
|
var w *wlog.WL
|
||||||
var err error
|
var err error
|
||||||
if opts.WithWAL {
|
if opts.WithWAL {
|
||||||
w, err = wlog.New(nil, nil, filepath.Join(opts.TSDBDir, "wal"), wlog.ParseCompressionType(true, string(wlog.CompressionSnappy)))
|
w, err = wlog.New(nil, nil, filepath.Join(opts.TSDBDir, "wal"), compressutil.ParseCompressionType(true, compression.Snappy))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
} else {
|
} else {
|
||||||
testutil.Ok(t, os.MkdirAll(filepath.Join(opts.TSDBDir, "wal"), os.ModePerm))
|
testutil.Ok(t, os.MkdirAll(filepath.Join(opts.TSDBDir, "wal"), os.ModePerm))
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/thanos-io/thanos/pkg/component"
|
"github.com/thanos-io/thanos/pkg/component"
|
||||||
"github.com/thanos-io/thanos/pkg/store"
|
"github.com/thanos-io/thanos/pkg/store"
|
||||||
"github.com/thanos-io/thanos/pkg/store/storepb"
|
"github.com/thanos-io/thanos/pkg/store/storepb"
|
||||||
|
@ -129,7 +130,7 @@ func TestTenantProxyPassing(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
func() []store.Client { return cls },
|
func() []store.Client { return cls },
|
||||||
component.Query,
|
component.Query,
|
||||||
nil, 0*time.Second, store.EagerRetrieval,
|
labels.EmptyLabels(), 0*time.Second, store.EagerRetrieval,
|
||||||
)
|
)
|
||||||
// We assert directly in the mocked store apis LabelValues/LabelNames/Series funcs
|
// We assert directly in the mocked store apis LabelValues/LabelNames/Series funcs
|
||||||
_, _ = q.LabelValues(ctx, &storepb.LabelValuesRequest{})
|
_, _ = q.LabelValues(ctx, &storepb.LabelValuesRequest{})
|
||||||
|
@ -174,7 +175,7 @@ func TestTenantProxyPassing(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
func() []store.Client { return cls },
|
func() []store.Client { return cls },
|
||||||
component.Query,
|
component.Query,
|
||||||
nil, 0*time.Second, store.EagerRetrieval,
|
labels.EmptyLabels(), 0*time.Second, store.EagerRetrieval,
|
||||||
)
|
)
|
||||||
|
|
||||||
// We assert directly in the mocked store apis LabelValues/LabelNames/Series funcs
|
// We assert directly in the mocked store apis LabelValues/LabelNames/Series funcs
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/thanos-io/objstore"
|
||||||
"github.com/thanos-io/objstore/client"
|
"github.com/thanos-io/objstore/client"
|
||||||
"github.com/thanos-io/objstore/providers/azure"
|
"github.com/thanos-io/objstore/providers/azure"
|
||||||
"github.com/thanos-io/objstore/providers/bos"
|
"github.com/thanos-io/objstore/providers/bos"
|
||||||
|
@ -45,15 +46,15 @@ var (
|
||||||
configs map[string]interface{}
|
configs map[string]interface{}
|
||||||
possibleValues []string
|
possibleValues []string
|
||||||
|
|
||||||
bucketConfigs = map[client.ObjProvider]interface{}{
|
bucketConfigs = map[objstore.ObjProvider]interface{}{
|
||||||
client.AZURE: azure.Config{},
|
objstore.AZURE: azure.DefaultConfig,
|
||||||
client.GCS: gcs.Config{},
|
objstore.GCS: gcs.DefaultConfig,
|
||||||
client.S3: s3.DefaultConfig,
|
objstore.S3: s3.DefaultConfig,
|
||||||
client.SWIFT: swift.DefaultConfig,
|
objstore.SWIFT: swift.DefaultConfig,
|
||||||
client.COS: cos.DefaultConfig,
|
objstore.COS: cos.DefaultConfig,
|
||||||
client.ALIYUNOSS: oss.Config{},
|
objstore.ALIYUNOSS: oss.Config{},
|
||||||
client.FILESYSTEM: filesystem.Config{},
|
objstore.FILESYSTEM: filesystem.Config{},
|
||||||
client.BOS: bos.Config{},
|
objstore.BOS: bos.Config{},
|
||||||
}
|
}
|
||||||
|
|
||||||
tracingConfigs = map[trclient.TracingProvider]interface{}{
|
tracingConfigs = map[trclient.TracingProvider]interface{}{
|
||||||
|
|
|
@ -455,7 +455,7 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bktConfig := client.BucketConfig{
|
bktConfig := client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -919,7 +919,7 @@ func TestCompactorDownsampleIgnoresMarked(t *testing.T) {
|
||||||
testutil.Ok(t, block.MarkForNoDownsample(context.Background(), logger, bkt, downsampledRawID, metadata.ManualNoDownsampleReason, "why not", promauto.With(nil).NewCounter(prometheus.CounterOpts{})))
|
testutil.Ok(t, block.MarkForNoDownsample(context.Background(), logger, bkt, downsampledRawID, metadata.ManualNoDownsampleReason, "why not", promauto.With(nil).NewCounter(prometheus.CounterOpts{})))
|
||||||
|
|
||||||
c := e2ethanos.NewCompactorBuilder(e, "working").Init(client.BucketConfig{
|
c := e2ethanos.NewCompactorBuilder(e, "working").Init(client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.Dir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.Dir()),
|
||||||
}, nil)
|
}, nil)
|
||||||
testutil.Ok(t, e2e.StartAndWaitReady(c))
|
testutil.Ok(t, e2e.StartAndWaitReady(c))
|
||||||
|
@ -967,7 +967,7 @@ func TestCompactorIssue6775(t *testing.T) {
|
||||||
|
|
||||||
// Downsample them first.
|
// Downsample them first.
|
||||||
bds := e2ethanos.NewToolsBucketDownsample(e, "downsample", client.BucketConfig{
|
bds := e2ethanos.NewToolsBucketDownsample(e, "downsample", client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
})
|
})
|
||||||
testutil.Ok(t, bds.Start())
|
testutil.Ok(t, bds.Start())
|
||||||
|
@ -997,7 +997,7 @@ func TestCompactorIssue6775(t *testing.T) {
|
||||||
|
|
||||||
// Run the compactor.
|
// Run the compactor.
|
||||||
c := e2ethanos.NewCompactorBuilder(e, "working").Init(client.BucketConfig{
|
c := e2ethanos.NewCompactorBuilder(e, "working").Init(client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.Dir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.Dir()),
|
||||||
}, nil, "--compact.enable-vertical-compaction")
|
}, nil, "--compact.enable-vertical-compaction")
|
||||||
testutil.Ok(t, e2e.StartAndWaitReady(c))
|
testutil.Ok(t, e2e.StartAndWaitReady(c))
|
||||||
|
@ -1042,7 +1042,7 @@ func TestCompactorDownsampleNativeHistograms(t *testing.T) {
|
||||||
testutil.Ok(t, objstore.UploadDir(context.Background(), logger, bkt, path.Join(dir, rawBlockID.String()), rawBlockID.String()))
|
testutil.Ok(t, objstore.UploadDir(context.Background(), logger, bkt, path.Join(dir, rawBlockID.String()), rawBlockID.String()))
|
||||||
// Downsample them first.
|
// Downsample them first.
|
||||||
bds := e2ethanos.NewToolsBucketDownsample(e, "downsample", client.BucketConfig{
|
bds := e2ethanos.NewToolsBucketDownsample(e, "downsample", client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
})
|
})
|
||||||
testutil.Ok(t, bds.Start())
|
testutil.Ok(t, bds.Start())
|
||||||
|
|
|
@ -149,7 +149,7 @@ func TestDistributedEngineWithOverlappingIntervalsEnabled(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"s1",
|
"s1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket1, minio1.InternalEndpoint("http"), minio1.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket1, minio1.InternalEndpoint("http"), minio1.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -242,7 +242,7 @@ func TestDistributedEngineWithoutOverlappingIntervals(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"s1",
|
"s1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket1, minio1.InternalEndpoint("http"), minio1.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket1, minio1.InternalEndpoint("http"), minio1.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
|
|
@ -777,6 +777,7 @@ type RulerBuilder struct {
|
||||||
evalInterval string
|
evalInterval string
|
||||||
forGracePeriod string
|
forGracePeriod string
|
||||||
restoreIgnoredLabels []string
|
restoreIgnoredLabels []string
|
||||||
|
nativeHistograms bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRulerBuilder is a Ruler future that allows extra configuration before initialization.
|
// NewRulerBuilder is a Ruler future that allows extra configuration before initialization.
|
||||||
|
@ -827,6 +828,11 @@ func (r *RulerBuilder) WithRestoreIgnoredLabels(labels ...string) *RulerBuilder
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RulerBuilder) WithNativeHistograms() *RulerBuilder {
|
||||||
|
r.nativeHistograms = true
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RulerBuilder) InitTSDB(internalRuleDir string, queryCfg []clientconfig.Config) *e2eobs.Observable {
|
func (r *RulerBuilder) InitTSDB(internalRuleDir string, queryCfg []clientconfig.Config) *e2eobs.Observable {
|
||||||
return r.initRule(internalRuleDir, queryCfg, nil)
|
return r.initRule(internalRuleDir, queryCfg, nil)
|
||||||
}
|
}
|
||||||
|
@ -894,6 +900,10 @@ func (r *RulerBuilder) initRule(internalRuleDir string, queryCfg []clientconfig.
|
||||||
ruleArgs["--remote-write.config"] = string(rwCfgBytes)
|
ruleArgs["--remote-write.config"] = string(rwCfgBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.nativeHistograms {
|
||||||
|
ruleArgs["--tsdb.enable-native-histograms"] = ""
|
||||||
|
}
|
||||||
|
|
||||||
args := e2e.BuildArgs(ruleArgs)
|
args := e2e.BuildArgs(ruleArgs)
|
||||||
|
|
||||||
for _, label := range r.restoreIgnoredLabels {
|
for _, label := range r.restoreIgnoredLabels {
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/efficientgo/e2e"
|
"github.com/efficientgo/e2e"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
|
||||||
|
"github.com/thanos-io/objstore"
|
||||||
"github.com/thanos-io/objstore/client"
|
"github.com/thanos-io/objstore/client"
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
|
@ -44,7 +45,7 @@ func TestInfo(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -70,43 +71,16 @@ func TestInfo(t *testing.T) {
|
||||||
expected := map[string][]query.EndpointStatus{
|
expected := map[string][]query.EndpointStatus{
|
||||||
"sidecar": {
|
"sidecar": {
|
||||||
{
|
{
|
||||||
Name: "e2e-test-info-sidecar-alone1:9091",
|
Name: "e2e-test-info-sidecar-alone1:9091",
|
||||||
LabelSets: []labels.Labels{{
|
LabelSets: []labels.Labels{labels.FromStrings("prometheus", "prom-alone1", "replica", "0")},
|
||||||
{
|
|
||||||
Name: "prometheus",
|
|
||||||
Value: "prom-alone1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica",
|
|
||||||
Value: "0",
|
|
||||||
},
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "e2e-test-info-sidecar-alone2:9091",
|
Name: "e2e-test-info-sidecar-alone2:9091",
|
||||||
LabelSets: []labels.Labels{{
|
LabelSets: []labels.Labels{labels.FromStrings("prometheus", "prom-alone2", "replica", "0")},
|
||||||
{
|
|
||||||
Name: "prometheus",
|
|
||||||
Value: "prom-alone2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica",
|
|
||||||
Value: "0",
|
|
||||||
},
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "e2e-test-info-sidecar-alone3:9091",
|
Name: "e2e-test-info-sidecar-alone3:9091",
|
||||||
LabelSets: []labels.Labels{{
|
LabelSets: []labels.Labels{labels.FromStrings("prometheus", "prom-alone3", "replica", "0")},
|
||||||
{
|
|
||||||
Name: "prometheus",
|
|
||||||
Value: "prom-alone3",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "replica",
|
|
||||||
Value: "0",
|
|
||||||
},
|
|
||||||
}},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"store": {
|
"store": {
|
||||||
|
|
|
@ -394,6 +394,64 @@ func TestRuleNativeHistograms(t *testing.T) {
|
||||||
queryAndAssert(t, ctx, q.Endpoint("http"), func() string { return expectedRecordedName }, time.Now, promclient.QueryOptions{Deduplicate: true}, expectedHistogramModelVector(expectedRecordedName, nil, expectedRecordedHistogram, map[string]string{"tenant_id": "default-tenant"}))
|
queryAndAssert(t, ctx, q.Endpoint("http"), func() string { return expectedRecordedName }, time.Now, promclient.QueryOptions{Deduplicate: true}, expectedHistogramModelVector(expectedRecordedName, nil, expectedRecordedHistogram, map[string]string{"tenant_id": "default-tenant"}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRuleNativeHistogramsTSDB(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
e, err := e2e.NewDockerEnvironment("hist-rule-tsdb")
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
t.Cleanup(e2ethanos.CleanScenario(t, e))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
rFuture := e2ethanos.NewRulerBuilder(e, "1").WithNativeHistograms()
|
||||||
|
rulesSubDir := "rules"
|
||||||
|
rulesPath := filepath.Join(rFuture.Dir(), rulesSubDir)
|
||||||
|
testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm))
|
||||||
|
|
||||||
|
for i, rule := range []string{testRuleRecordHistogramSum} {
|
||||||
|
createRuleFile(t, filepath.Join(rulesPath, fmt.Sprintf("rules-%d.yaml", i)), rule)
|
||||||
|
}
|
||||||
|
|
||||||
|
receiver := e2ethanos.NewReceiveBuilder(e, "1").WithIngestionEnabled().WithNativeHistograms().Init()
|
||||||
|
testutil.Ok(t, e2e.StartAndWaitReady(receiver))
|
||||||
|
|
||||||
|
receiver2 := e2ethanos.NewReceiveBuilder(e, "2").WithIngestionEnabled().WithNativeHistograms().Init()
|
||||||
|
testutil.Ok(t, e2e.StartAndWaitReady(receiver2))
|
||||||
|
|
||||||
|
q := e2ethanos.NewQuerierBuilder(e, "1", receiver.InternalEndpoint("grpc"), receiver2.InternalEndpoint("grpc")).WithReplicaLabels("receive", "replica").Init()
|
||||||
|
testutil.Ok(t, e2e.StartAndWaitReady(q))
|
||||||
|
|
||||||
|
histograms := tsdbutil.GenerateTestHistograms(4)
|
||||||
|
ts := time.Now().Add(-2 * time.Minute)
|
||||||
|
rawRemoteWriteURL1 := "http://" + receiver.Endpoint("remote-write") + "/api/v1/receive"
|
||||||
|
_, err = writeHistograms(ctx, ts, testHistogramMetricName, histograms, nil, rawRemoteWriteURL1, prompb.Label{Name: "series", Value: "one"})
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
rawRemoteWriteURL2 := "http://" + receiver2.Endpoint("remote-write") + "/api/v1/receive"
|
||||||
|
_, err = writeHistograms(ctx, ts, testHistogramMetricName, histograms, nil, rawRemoteWriteURL2, prompb.Label{Name: "series", Value: "two"})
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
r := rFuture.InitTSDB(filepath.Join(rFuture.InternalDir(), rulesSubDir), []clientconfig.Config{
|
||||||
|
{
|
||||||
|
GRPCConfig: &clientconfig.GRPCConfig{
|
||||||
|
EndpointAddrs: []string{q.InternalEndpoint("grpc")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
testutil.Ok(t, e2e.StartAndWaitReady(r))
|
||||||
|
|
||||||
|
qR := e2ethanos.NewQuerierBuilder(e, "2", r.InternalEndpoint("grpc")).Init()
|
||||||
|
testutil.Ok(t, e2e.StartAndWaitReady(qR))
|
||||||
|
|
||||||
|
// Wait until samples are written successfully.
|
||||||
|
histogramMatcher, _ := matchers.NewMatcher(matchers.MatchEqual, "type", "histogram")
|
||||||
|
testutil.Ok(t, r.WaitSumMetricsWithOptions(e2emon.GreaterOrEqual(1), []string{"prometheus_tsdb_head_samples_appended_total"}, e2emon.WithLabelMatchers(histogramMatcher), e2emon.WaitMissingMetrics()))
|
||||||
|
|
||||||
|
expectedRecordedName := testHistogramMetricName + ":sum"
|
||||||
|
expectedRecordedHistogram := histograms[len(histograms)-1].ToFloat(nil).Mul(2)
|
||||||
|
queryAndAssert(t, ctx, qR.Endpoint("http"), func() string { return expectedRecordedName }, time.Now, promclient.QueryOptions{Deduplicate: true}, expectedHistogramModelVector(expectedRecordedName, nil, expectedRecordedHistogram, nil))
|
||||||
|
}
|
||||||
|
|
||||||
func writeHistograms(ctx context.Context, now time.Time, name string, histograms []*histogram.Histogram, floatHistograms []*histogram.FloatHistogram, rawRemoteWriteURL string, labels ...prompb.Label) (time.Time, error) {
|
func writeHistograms(ctx context.Context, now time.Time, name string, histograms []*histogram.Histogram, floatHistograms []*histogram.FloatHistogram, rawRemoteWriteURL string, labels ...prompb.Label) (time.Time, error) {
|
||||||
startTime := now.Add(time.Duration(len(histograms)-1) * -30 * time.Second).Truncate(30 * time.Second)
|
startTime := now.Add(time.Duration(len(histograms)-1) * -30 * time.Second).Truncate(30 * time.Second)
|
||||||
prompbHistograms := make([]prompb.Histogram, 0, len(histograms))
|
prompbHistograms := make([]prompb.Histogram, 0, len(histograms))
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -586,36 +585,31 @@ func TestRangeQueryShardingWithRandomData(t *testing.T) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
timeSeries := []labels.Labels{
|
timeSeries := []labels.Labels{
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "1"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "1", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "1"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "1", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "2"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "2", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "2"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "2", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "3"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "3", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "3"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "3", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "4"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "4", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "4"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "4", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "5"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "5", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "5"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "5", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "6"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "6", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "6"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "6", "handler", "/metrics"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure labels are ordered.
|
timeSeries = sortLabels(timeSeries)
|
||||||
for _, ts := range timeSeries {
|
|
||||||
sort.Slice(ts, func(i, j int) bool {
|
|
||||||
return ts[i].Name < ts[j].Name
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
samplespb := make([]prompb.TimeSeries, 0, len(timeSeries))
|
samplespb := make([]prompb.TimeSeries, 0, len(timeSeries))
|
||||||
for _, labels := range timeSeries {
|
for _, lbls := range timeSeries {
|
||||||
labelspb := make([]prompb.Label, 0, len(labels))
|
labelspb := make([]prompb.Label, 0, lbls.Len())
|
||||||
for _, label := range labels {
|
lbls.Range(func(l labels.Label) {
|
||||||
labelspb = append(labelspb, prompb.Label{
|
labelspb = append(labelspb, prompb.Label{
|
||||||
Name: string(label.Name),
|
Name: l.Name,
|
||||||
Value: string(label.Value),
|
Value: l.Value,
|
||||||
})
|
})
|
||||||
}
|
})
|
||||||
samplespb = append(samplespb, prompb.TimeSeries{
|
samplespb = append(samplespb, prompb.TimeSeries{
|
||||||
Labels: labelspb,
|
Labels: labelspb,
|
||||||
Samples: []prompb.Sample{
|
Samples: []prompb.Sample{
|
||||||
|
@ -791,36 +785,32 @@ func TestInstantQueryShardingWithRandomData(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
timeSeries := []labels.Labels{
|
timeSeries := []labels.Labels{
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "1"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "1", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "1"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "1", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "2"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "2", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "2"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "2", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "3"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "3", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "3"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "3", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "4"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "4", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "4"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "4", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "5"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "5", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "5"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "5", "handler", "/metrics"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "6"}, {Name: "handler", Value: "/"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "6", "handler", "/"),
|
||||||
{{Name: labels.MetricName, Value: "http_requests_total"}, {Name: "pod", Value: "6"}, {Name: "handler", Value: "/metrics"}},
|
labels.FromStrings(labels.MetricName, "http_requests_total", "pod", "6", "handler", "/metrics"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure labels are ordered.
|
// Ensure lbl are ordered.
|
||||||
for _, ts := range timeSeries {
|
timeSeries = sortLabels(timeSeries)
|
||||||
sort.Slice(ts, func(i, j int) bool {
|
|
||||||
return ts[i].Name < ts[j].Name
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
samplespb := make([]prompb.TimeSeries, 0, len(timeSeries))
|
samplespb := make([]prompb.TimeSeries, 0, len(timeSeries))
|
||||||
for _, labels := range timeSeries {
|
for _, lbl := range timeSeries {
|
||||||
labelspb := make([]prompb.Label, 0, len(labels))
|
labelspb := make([]prompb.Label, 0, lbl.Len())
|
||||||
for _, label := range labels {
|
lbl.Range(func(l labels.Label) {
|
||||||
labelspb = append(labelspb, prompb.Label{
|
labelspb = append(labelspb, prompb.Label{
|
||||||
Name: string(label.Name),
|
Name: l.Name,
|
||||||
Value: string(label.Value),
|
Value: l.Value,
|
||||||
})
|
})
|
||||||
}
|
})
|
||||||
samplespb = append(samplespb, prompb.TimeSeries{
|
samplespb = append(samplespb, prompb.TimeSeries{
|
||||||
Labels: labelspb,
|
Labels: labelspb,
|
||||||
Samples: []prompb.Sample{
|
Samples: []prompb.Sample{
|
||||||
|
@ -1155,3 +1145,15 @@ func TestQueryFrontendReadyOnlyIfDownstreamIsAvailable(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sortLabels(timeSeries []labels.Labels) []labels.Labels {
|
||||||
|
for j, ts := range timeSeries {
|
||||||
|
builder := labels.NewBuilder(labels.EmptyLabels())
|
||||||
|
ts.Range(func(l labels.Label) {
|
||||||
|
builder.Set(l.Name, l.Value)
|
||||||
|
})
|
||||||
|
|
||||||
|
timeSeries[j] = builder.Labels()
|
||||||
|
}
|
||||||
|
return timeSeries
|
||||||
|
}
|
||||||
|
|
|
@ -713,7 +713,7 @@ func TestQueryStoreMetrics(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"s1",
|
"s1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, minio.InternalEndpoint("http"), minio.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, minio.InternalEndpoint("http"), minio.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -864,7 +864,7 @@ func TestQueryStoreDedup(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"s1",
|
"s1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, minio.InternalEndpoint("http"), minio.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, minio.InternalEndpoint("http"), minio.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -1376,7 +1376,7 @@ func instantQuery(t testing.TB, ctx context.Context, addr string, q func() strin
|
||||||
"msg", fmt.Sprintf("Waiting for %d results for query %s", expectedSeriesLen, q()),
|
"msg", fmt.Sprintf("Waiting for %d results for query %s", expectedSeriesLen, q()),
|
||||||
)
|
)
|
||||||
|
|
||||||
testutil.Ok(t, runutil.RetryWithLog(logger, 5*time.Second, ctx.Done(), func() error {
|
testutil.Ok(t, runutil.RetryWithLog(logger, 10*time.Second, ctx.Done(), func() error {
|
||||||
res, _, err := simpleInstantQuery(t, ctx, addr, q, ts, opts, expectedSeriesLen)
|
res, _, err := simpleInstantQuery(t, ctx, addr, q, ts, opts, expectedSeriesLen)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1650,7 +1650,7 @@ func remoteWriteSeriesWithLabels(ctx context.Context, prometheus *e2eobs.Observa
|
||||||
samplespb := make([]prompb.TimeSeries, 0, len(series))
|
samplespb := make([]prompb.TimeSeries, 0, len(series))
|
||||||
r := rand.New(rand.NewSource(int64(len(series))))
|
r := rand.New(rand.NewSource(int64(len(series))))
|
||||||
for _, serie := range series {
|
for _, serie := range series {
|
||||||
labelspb := make([]prompb.Label, 0, len(serie.intLabels))
|
labelspb := make([]prompb.Label, 0, serie.intLabels.Len())
|
||||||
for labelKey, labelValue := range serie.intLabels.Map() {
|
for labelKey, labelValue := range serie.intLabels.Map() {
|
||||||
labelspb = append(labelspb, prompb.Label{
|
labelspb = append(labelspb, prompb.Label{
|
||||||
Name: labelKey,
|
Name: labelKey,
|
||||||
|
@ -2150,7 +2150,7 @@ func TestQueryTenancyEnforcement(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"s1",
|
"s1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, minio.InternalEndpoint("http"), minio.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, minio.InternalEndpoint("http"), minio.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
|
|
@ -34,6 +34,7 @@ import (
|
||||||
|
|
||||||
"github.com/efficientgo/core/testutil"
|
"github.com/efficientgo/core/testutil"
|
||||||
|
|
||||||
|
"github.com/thanos-io/thanos/pkg/exemplars/exemplarspb"
|
||||||
"github.com/thanos-io/thanos/pkg/promclient"
|
"github.com/thanos-io/thanos/pkg/promclient"
|
||||||
"github.com/thanos-io/thanos/pkg/receive"
|
"github.com/thanos-io/thanos/pkg/receive"
|
||||||
"github.com/thanos-io/thanos/pkg/runutil"
|
"github.com/thanos-io/thanos/pkg/runutil"
|
||||||
|
@ -1210,7 +1211,7 @@ func TestReceiveCpnp(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
t.Cleanup(e2ethanos.CleanScenario(t, e))
|
t.Cleanup(e2ethanos.CleanScenario(t, e))
|
||||||
|
|
||||||
i := e2ethanos.NewReceiveBuilder(e, "ingestor").WithIngestionEnabled().Init()
|
i := e2ethanos.NewReceiveBuilder(e, "ingestor").WithIngestionEnabled().WithExemplarsInMemStorage(100).Init()
|
||||||
testutil.Ok(t, e2e.StartAndWaitReady(i))
|
testutil.Ok(t, e2e.StartAndWaitReady(i))
|
||||||
|
|
||||||
h := receive.HashringConfig{
|
h := receive.HashringConfig{
|
||||||
|
@ -1232,6 +1233,15 @@ func TestReceiveCpnp(t *testing.T) {
|
||||||
return storeWriteRequest(context.Background(), "http://"+r.Endpoint("remote-write")+"/api/v1/receive", &prompb.WriteRequest{
|
return storeWriteRequest(context.Background(), "http://"+r.Endpoint("remote-write")+"/api/v1/receive", &prompb.WriteRequest{
|
||||||
Timeseries: []prompb.TimeSeries{
|
Timeseries: []prompb.TimeSeries{
|
||||||
{
|
{
|
||||||
|
Exemplars: []prompb.Exemplar{
|
||||||
|
{
|
||||||
|
Labels: []prompb.Label{
|
||||||
|
{Name: "receive", Value: "receive-ingestor"},
|
||||||
|
},
|
||||||
|
Value: 1.2345,
|
||||||
|
Timestamp: timestamp.FromTime(ts),
|
||||||
|
},
|
||||||
|
},
|
||||||
Labels: []prompb.Label{
|
Labels: []prompb.Label{
|
||||||
{Name: model.MetricNameLabel, Value: "myself"},
|
{Name: model.MetricNameLabel, Value: "myself"},
|
||||||
},
|
},
|
||||||
|
@ -1265,4 +1275,12 @@ func TestReceiveCpnp(t *testing.T) {
|
||||||
},
|
},
|
||||||
}, v)
|
}, v)
|
||||||
|
|
||||||
|
// TODO(GiedriusS): repro for https://github.com/thanos-io/thanos/issues/8224. Fix in following PRs.
|
||||||
|
queryExemplars(
|
||||||
|
t, context.Background(), q.Endpoint("http"), "myself", timestamp.FromTime(ts), timestamp.FromTime(ts), func(data []*exemplarspb.ExemplarData) error {
|
||||||
|
require.Equal(t, "\000\000\000\000\000\000\000", data[0].Exemplars[0].Labels.Labels[0].Name)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,8 @@ groups:
|
||||||
- name: example_abort
|
- name: example_abort
|
||||||
interval: 1s
|
interval: 1s
|
||||||
# Abort should be a default: partial_response_strategy: "ABORT"
|
# Abort should be a default: partial_response_strategy: "ABORT"
|
||||||
|
labels:
|
||||||
|
foo: bar
|
||||||
rules:
|
rules:
|
||||||
- alert: TestAlert_AbortOnPartialResponse
|
- alert: TestAlert_AbortOnPartialResponse
|
||||||
# It must be based on actual metrics otherwise call to StoreAPI would be not involved.
|
# It must be based on actual metrics otherwise call to StoreAPI would be not involved.
|
||||||
|
@ -501,6 +503,7 @@ func TestRule(t *testing.T) {
|
||||||
"__name__": "ALERTS",
|
"__name__": "ALERTS",
|
||||||
"severity": "page",
|
"severity": "page",
|
||||||
"alertname": "TestAlert_AbortOnPartialResponse",
|
"alertname": "TestAlert_AbortOnPartialResponse",
|
||||||
|
"foo": "bar",
|
||||||
"alertstate": "firing",
|
"alertstate": "firing",
|
||||||
"replica": "1",
|
"replica": "1",
|
||||||
},
|
},
|
||||||
|
@ -521,11 +524,6 @@ func TestRule(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
expAlertLabels := []model.LabelSet{
|
expAlertLabels := []model.LabelSet{
|
||||||
{
|
|
||||||
"severity": "page",
|
|
||||||
"alertname": "TestAlert_AbortOnPartialResponse",
|
|
||||||
"replica": "1",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"severity": "page",
|
"severity": "page",
|
||||||
"alertname": "TestAlert_HasBeenLoadedViaWebHandler",
|
"alertname": "TestAlert_HasBeenLoadedViaWebHandler",
|
||||||
|
@ -536,6 +534,12 @@ func TestRule(t *testing.T) {
|
||||||
"alertname": "TestAlert_WarnOnPartialResponse",
|
"alertname": "TestAlert_WarnOnPartialResponse",
|
||||||
"replica": "1",
|
"replica": "1",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"severity": "page",
|
||||||
|
"foo": "bar",
|
||||||
|
"alertname": "TestAlert_AbortOnPartialResponse",
|
||||||
|
"replica": "1",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
alrts, err := promclient.NewDefaultClient().AlertmanagerAlerts(ctx, urlParse(t, "http://"+am2.Endpoint("http")))
|
alrts, err := promclient.NewDefaultClient().AlertmanagerAlerts(ctx, urlParse(t, "http://"+am2.Endpoint("http")))
|
||||||
|
|
|
@ -102,6 +102,7 @@ func TestRulesAPI_Fanout(t *testing.T) {
|
||||||
State: rulespb.AlertState_FIRING,
|
State: rulespb.AlertState_FIRING,
|
||||||
Query: "absent(some_metric)",
|
Query: "absent(some_metric)",
|
||||||
Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{
|
Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{
|
||||||
|
{Name: "foo", Value: "bar"},
|
||||||
{Name: "prometheus", Value: "ha"},
|
{Name: "prometheus", Value: "ha"},
|
||||||
{Name: "severity", Value: "page"},
|
{Name: "severity", Value: "page"},
|
||||||
}},
|
}},
|
||||||
|
@ -118,6 +119,7 @@ func TestRulesAPI_Fanout(t *testing.T) {
|
||||||
State: rulespb.AlertState_FIRING,
|
State: rulespb.AlertState_FIRING,
|
||||||
Query: "absent(some_metric)",
|
Query: "absent(some_metric)",
|
||||||
Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{
|
Labels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{
|
||||||
|
{Name: "foo", Value: "bar"},
|
||||||
{Name: "severity", Value: "page"},
|
{Name: "severity", Value: "page"},
|
||||||
}},
|
}},
|
||||||
Health: string(rules.HealthGood),
|
Health: string(rules.HealthGood),
|
||||||
|
|
|
@ -77,7 +77,7 @@ metafile_content_ttl: 0s`, memcached.InternalEndpoint("memcached"))
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
memcachedConfig,
|
memcachedConfig,
|
||||||
|
@ -413,7 +413,7 @@ func TestStoreGatewayNoCacheFile(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -645,7 +645,7 @@ blocks_iter_ttl: 0s`, memcached.InternalEndpoint("memcached"))
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
memcachedConfig,
|
memcachedConfig,
|
||||||
|
@ -754,7 +754,7 @@ metafile_content_ttl: 0s`
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
fmt.Sprintf(groupcacheConfig, 1),
|
fmt.Sprintf(groupcacheConfig, 1),
|
||||||
|
@ -765,7 +765,7 @@ metafile_content_ttl: 0s`
|
||||||
e,
|
e,
|
||||||
"2",
|
"2",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
fmt.Sprintf(groupcacheConfig, 2),
|
fmt.Sprintf(groupcacheConfig, 2),
|
||||||
|
@ -776,7 +776,7 @@ metafile_content_ttl: 0s`
|
||||||
e,
|
e,
|
||||||
"3",
|
"3",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
fmt.Sprintf(groupcacheConfig, 3),
|
fmt.Sprintf(groupcacheConfig, 3),
|
||||||
|
@ -873,7 +873,7 @@ config:
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
string(cacheCfg),
|
string(cacheCfg),
|
||||||
|
@ -885,7 +885,7 @@ config:
|
||||||
e,
|
e,
|
||||||
"2",
|
"2",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
string(cacheCfg),
|
string(cacheCfg),
|
||||||
|
@ -896,7 +896,7 @@ config:
|
||||||
e,
|
e,
|
||||||
"3",
|
"3",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
string(cacheCfg),
|
string(cacheCfg),
|
||||||
|
@ -1041,7 +1041,7 @@ config:
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -1137,7 +1137,7 @@ func TestStoreGatewayLazyExpandedPostingsEnabled(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -1148,7 +1148,7 @@ func TestStoreGatewayLazyExpandedPostingsEnabled(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"2",
|
"2",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -1294,7 +1294,7 @@ func TestStoreGatewayLazyExpandedPostingsPromQLSmithFuzz(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"1",
|
"1",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -1305,7 +1305,7 @@ func TestStoreGatewayLazyExpandedPostingsPromQLSmithFuzz(t *testing.T) {
|
||||||
e,
|
e,
|
||||||
"2",
|
"2",
|
||||||
client.BucketConfig{
|
client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
|
@ -1339,12 +1339,7 @@ func TestStoreGatewayLazyExpandedPostingsPromQLSmithFuzz(t *testing.T) {
|
||||||
statusCodes := []string{"200", "400", "404", "500", "502"}
|
statusCodes := []string{"200", "400", "404", "500", "502"}
|
||||||
extLset := labels.FromStrings("ext1", "value1", "replica", "1")
|
extLset := labels.FromStrings("ext1", "value1", "replica", "1")
|
||||||
for i := 0; i < numSeries; i++ {
|
for i := 0; i < numSeries; i++ {
|
||||||
lbl := labels.Labels{
|
lbl := labels.FromStrings(labels.MetricName, metricName, "job", "test", "series", strconv.Itoa(i%200), "status_code", statusCodes[i%5])
|
||||||
{Name: labels.MetricName, Value: metricName},
|
|
||||||
{Name: "job", Value: "test"},
|
|
||||||
{Name: "series", Value: strconv.Itoa(i % 200)},
|
|
||||||
{Name: "status_code", Value: statusCodes[i%5]},
|
|
||||||
}
|
|
||||||
lbls = append(lbls, lbl)
|
lbls = append(lbls, lbl)
|
||||||
}
|
}
|
||||||
id, err := e2eutil.CreateBlockWithChurn(ctx, rnd, dir, lbls, numSamples, startMs, endMs, extLset, 0, scrapeInterval, 10)
|
id, err := e2eutil.CreateBlockWithChurn(ctx, rnd, dir, lbls, numSamples, startMs, endMs, extLset, 0, scrapeInterval, 10)
|
||||||
|
|
|
@ -48,7 +48,7 @@ func TestToolsBucketWebExternalPrefixWithoutReverseProxy(t *testing.T) {
|
||||||
testutil.Ok(t, e2e.StartAndWaitReady(m))
|
testutil.Ok(t, e2e.StartAndWaitReady(m))
|
||||||
|
|
||||||
svcConfig := client.BucketConfig{
|
svcConfig := client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.InternalDir()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ func TestToolsBucketWebExternalPrefix(t *testing.T) {
|
||||||
testutil.Ok(t, e2e.StartAndWaitReady(m))
|
testutil.Ok(t, e2e.StartAndWaitReady(m))
|
||||||
|
|
||||||
svcConfig := client.BucketConfig{
|
svcConfig := client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.InternalDir()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ func TestToolsBucketWebExternalPrefixAndRoutePrefix(t *testing.T) {
|
||||||
testutil.Ok(t, e2e.StartAndWaitReady(m))
|
testutil.Ok(t, e2e.StartAndWaitReady(m))
|
||||||
|
|
||||||
svcConfig := client.BucketConfig{
|
svcConfig := client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.Endpoint("http"), m.InternalDir()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ func TestToolsBucketWebWithTimeAndRelabelFilter(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Start thanos tool bucket web.
|
// Start thanos tool bucket web.
|
||||||
svcConfig := client.BucketConfig{
|
svcConfig := client.BucketConfig{
|
||||||
Type: client.S3,
|
Type: objstore.S3,
|
||||||
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
Config: e2ethanos.NewS3Config(bucket, m.InternalEndpoint("http"), m.InternalDir()),
|
||||||
}
|
}
|
||||||
b := e2ethanos.NewToolsBucketWeb(
|
b := e2ethanos.NewToolsBucketWeb(
|
||||||
|
|
Loading…
Reference in New Issue