Compare commits
No commits in common. "main" and "flagd-proxy/v0.6.6" have entirely different histories.
main
...
flagd-prox
|
@ -15,6 +15,9 @@ on:
|
|||
- "README.md"
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
GO_VERSION: '~1.21'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -27,7 +30,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- run: make workspace-init
|
||||
- run: make lint
|
||||
|
||||
|
@ -39,7 +42,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- run: make workspace-init
|
||||
- run: make generate-docs
|
||||
- name: Check no diff
|
||||
|
@ -57,7 +60,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- run: make workspace-init
|
||||
- run: make test
|
||||
- name: Upload coverage to Codecov
|
||||
|
@ -75,7 +78,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
|
@ -95,15 +98,13 @@ jobs:
|
|||
tags: flagd-local:test
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
input: ${{ github.workspace }}/flagd-local.tar
|
||||
format: "sarif"
|
||||
input: /github/workspace/flagd-local.tar
|
||||
format: "template"
|
||||
template: "@/contrib/sarif.tpl"
|
||||
output: "trivy-results.sarif"
|
||||
severity: "CRITICAL,HIGH"
|
||||
env:
|
||||
# use an alternative trivvy db to avoid rate limits
|
||||
TRIVY_DB_REPOSITORY: public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3
|
||||
|
@ -122,15 +123,7 @@ jobs:
|
|||
- name: Setup go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5
|
||||
with:
|
||||
go-version-file: 'flagd/go.mod'
|
||||
|
||||
- name: Install envoy
|
||||
run: |
|
||||
wget -O- https://apt.envoyproxy.io/signing.key | sudo gpg --dearmor -o /etc/apt/keyrings/envoy-keyring.gpg
|
||||
echo "deb [signed-by=/etc/apt/keyrings/envoy-keyring.gpg] https://apt.envoyproxy.io jammy main" | sudo tee /etc/apt/sources.list.d/envoy.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install envoy
|
||||
envoy --version
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Workspace init
|
||||
run: make workspace-init
|
||||
|
@ -147,9 +140,5 @@ jobs:
|
|||
-f file:${{ github.workspace }}/test-harness/flags/zero-flags.json \
|
||||
-f file:${{ github.workspace }}/test-harness/flags/edge-case-flags.json &
|
||||
|
||||
- name: Run envoy proxy in background
|
||||
run: |
|
||||
envoy -c ./test/integration/config/envoy.yaml &
|
||||
|
||||
- name: Run evaluation test suite
|
||||
run: go clean -testcache && go test -cover ./test/integration
|
||||
|
|
|
@ -63,7 +63,6 @@ jobs:
|
|||
container-release:
|
||||
name: Build and push containers to GHCR
|
||||
needs: release-please
|
||||
environment: publish
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.release-please.outputs.items_to_publish != '' && toJson(fromJson(needs.release-please.outputs.items_to_publish)) != '[]' }}
|
||||
strategy:
|
||||
|
@ -109,8 +108,6 @@ jobs:
|
|||
context: .
|
||||
file: ./${{ matrix.path }}/build.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: mode=max
|
||||
sbom: true
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.REPO_OWNER }}/${{ matrix.path }}:latest
|
||||
|
@ -131,12 +128,24 @@ jobs:
|
|||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
|
||||
- name: Generate image SBOM file name
|
||||
id: image-sbom-file-gen
|
||||
run: echo "IMG_SBOM_FILE=${{ format('{0}-{1}-sbom.spdx', matrix.path, env.VERSION) }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: SBOM for latest image
|
||||
uses: anchore/sbom-action@b6a39da80722a2cb0ef5d197531764a89b5d48c3 # v0
|
||||
with:
|
||||
image: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }}/${{ matrix.path }}:${{ env.VERSION }}
|
||||
artifact-name: ${{ steps.image-sbom-file-gen.outputs.IMG_SBOM_FILE }}
|
||||
output-file: ${{ steps.image-sbom-file-gen.outputs.IMG_SBOM_FILE }}
|
||||
|
||||
- name: Bundle release assets
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
|
||||
with:
|
||||
tag_name: ${{ env.TAG }}
|
||||
files: |
|
||||
${{ env.PUBLIC_KEY_FILE }}
|
||||
${{ steps.image-sbom-file-gen.outputs.IMG_SBOM_FILE }}
|
||||
|
||||
release-go-binaries:
|
||||
name: Create and publish binaries to GitHub
|
||||
|
@ -202,6 +211,19 @@ jobs:
|
|||
run: |
|
||||
env CGO_ENABLED=0 GOOS=windows GOARCH=386 go build ${{ env.BUILD_ARGS }} -o ./${{ matrix.path }}_windows_i386 ./${{ matrix.path }}/main.go
|
||||
zip -r ${{ matrix.path }}_${{ env.VERSION_NO_PREFIX }}_Windows_i386.zip ./${{ matrix.path }}_windows_i386 ./LICENSE ./CHANGELOG.md ./README.md ./sbom.xml
|
||||
# Bundle licenses
|
||||
- name: Install go-licenses
|
||||
run: go install github.com/google/go-licenses@latest
|
||||
- name: Build license extraction locations
|
||||
id: license-files
|
||||
run: |
|
||||
echo "LICENSE_FOLDER=${{ format('{0}-third-party-license', matrix.path) }}" >> $GITHUB_OUTPUT
|
||||
echo "LICENSE_ERROR_FILE=${{ format('{0}-license-errors.txt', matrix.path) }}" >> $GITHUB_OUTPUT
|
||||
- name: Run go-licenses for module ${{ matrix.path }}
|
||||
run: go-licenses save ./${{ matrix.path }} --save_path=./${{ steps.license-files.outputs.LICENSE_FOLDER }} --force --logtostderr=false 2> ./${{ steps.license-files.outputs.LICENSE_ERROR_FILE }}
|
||||
continue-on-error: true # tool set stderr which can be ignored and referred through error artefact
|
||||
- name: Bundle license extracts
|
||||
run: tar czf ./${{ steps.license-files.outputs.LICENSE_FOLDER }}.tar.gz ./${{ steps.license-files.outputs.LICENSE_FOLDER }}
|
||||
# Bundle release artifacts
|
||||
- name: Bundle release assets
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
|
||||
|
@ -211,6 +233,7 @@ jobs:
|
|||
./sbom.xml
|
||||
./*.tar.gz
|
||||
./*.zip
|
||||
./${{ steps.license-files.outputs.LICENSE_ERROR_FILE }}
|
||||
homebrew:
|
||||
name: Bump homebrew-core formula
|
||||
needs: release-please
|
||||
|
|
|
@ -21,6 +21,3 @@ site
|
|||
|
||||
# coverage results
|
||||
*coverage.out
|
||||
|
||||
# benchmark results
|
||||
benchmark.txt
|
|
@ -6,4 +6,4 @@
|
|||
url = https://github.com/open-feature/spec.git
|
||||
[submodule "schemas"]
|
||||
path = schemas
|
||||
url = https://github.com/open-feature/flagd-schemas.git
|
||||
url = https://github.com/open-feature/schemas.git
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
run:
|
||||
timeout: 3m
|
||||
linters-settings:
|
||||
funlen:
|
||||
statements: 50
|
||||
golint:
|
||||
min-confidence: 0.6
|
||||
enable-all: true
|
||||
issues:
|
||||
exclude:
|
||||
- pkg/generated
|
||||
exclude-rules:
|
||||
- path: _test.go
|
||||
linters:
|
||||
- funlen
|
||||
- maligned
|
||||
- noctx
|
||||
- scopelint
|
||||
- bodyclose
|
||||
- lll
|
||||
- goconst
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- dupl
|
||||
- staticcheck
|
||||
exclude-dirs:
|
||||
- (^|/)bin($|/)
|
||||
- (^|/)examples($|/)
|
||||
- (^|/)schemas($|/)
|
||||
- (^|/)test-harness($|/)
|
102
.golangci.yml
102
.golangci.yml
|
@ -1,43 +1,67 @@
|
|||
version: "2"
|
||||
linters:
|
||||
settings:
|
||||
funlen:
|
||||
statements: 50
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- bodyclose
|
||||
- dupl
|
||||
- funlen
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- lll
|
||||
- maligned
|
||||
- noctx
|
||||
- scopelint
|
||||
- staticcheck
|
||||
path: _test.go
|
||||
- path: (.+)\.go$
|
||||
text: pkg/generated
|
||||
paths:
|
||||
run:
|
||||
skip-dirs:
|
||||
- (^|/)bin($|/)
|
||||
- (^|/)examples($|/)
|
||||
- (^|/)schemas($|/)
|
||||
- (^|/)test-harness($|/)
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- asasalint
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- contextcheck
|
||||
- dogsled
|
||||
- dupl
|
||||
- dupword
|
||||
- durationcheck
|
||||
- errchkjson
|
||||
- exhaustive
|
||||
- funlen
|
||||
- gci
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- interfacebloat
|
||||
- gosec
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- nilerr
|
||||
- nilnil
|
||||
- noctx
|
||||
- nosprintfhostport
|
||||
- prealloc
|
||||
- promlinter
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- exportloopref
|
||||
- stylecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- gofumpt
|
||||
- tenv
|
||||
linters-settings:
|
||||
funlen:
|
||||
statements: 50
|
||||
golint:
|
||||
min-confidence: 0.6
|
||||
issues:
|
||||
exclude:
|
||||
- pkg/generated
|
||||
exclude-rules:
|
||||
- path: _test.go
|
||||
linters:
|
||||
- funlen
|
||||
- maligned
|
||||
- noctx
|
||||
- scopelint
|
||||
- bodyclose
|
||||
- lll
|
||||
- goconst
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- dupl
|
||||
- staticcheck
|
||||
|
|
|
@ -13,9 +13,6 @@ config:
|
|||
max-one-sentence-per-line: true
|
||||
code-block-style: false # not compatible with mkdocs "details" panes
|
||||
no-alt-text: false
|
||||
descriptive-link-text: false
|
||||
MD007:
|
||||
indent: 4
|
||||
|
||||
ignores:
|
||||
- "**/CHANGELOG.md"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"flagd": "0.12.9",
|
||||
"flagd-proxy": "0.8.0",
|
||||
"core": "0.12.1"
|
||||
"flagd": "0.11.3",
|
||||
"flagd-proxy": "0.6.6",
|
||||
"core": "0.10.3"
|
||||
}
|
|
@ -3,4 +3,4 @@
|
|||
#
|
||||
# Managed by Peribolos: https://github.com/open-feature/community/blob/main/config/open-feature/cloud-native/workgroup.yaml
|
||||
#
|
||||
* @open-feature/flagd-maintainers @open-feature/maintainers
|
||||
* @open-feature/cloud-native-maintainers
|
||||
|
|
|
@ -8,24 +8,6 @@ TLDR: be respectful.
|
|||
Any contributions are expected to include unit tests.
|
||||
These can be validated with `make test` or the automated github workflow will run them on PR creation.
|
||||
|
||||
## Development
|
||||
|
||||
### Prerequisites
|
||||
|
||||
You'll need:
|
||||
|
||||
- Go
|
||||
- make
|
||||
- docker
|
||||
|
||||
You'll want:
|
||||
|
||||
- curl (for calling HTTP endpoints)
|
||||
- [grpcurl](https://github.com/fullstorydev/grpcurl) (for making gRPC calls)
|
||||
- jq (for pretty printing responses)
|
||||
|
||||
### Workspace Initialization
|
||||
|
||||
This project uses a go workspace, to setup the project run
|
||||
|
||||
```shell
|
||||
|
@ -40,70 +22,6 @@ The project uses remote buf packages, changing the remote generation source will
|
|||
export GOPRIVATE=buf.build/gen/go
|
||||
```
|
||||
|
||||
### Manual testing
|
||||
|
||||
flagd has a number of interfaces (you can read more about them at [flagd.dev](https://flagd.dev/)) which can be used to evaluate flags, or deliver flag configurations so that they can be evaluated by _in-process_ providers.
|
||||
|
||||
You can manually test this functionality by starting flagd (from the flagd/ directory) with `go run main.go start -f file:../config/samples/example_flags.flagd.json`.
|
||||
|
||||
NOTE: you will need `go, curl`
|
||||
|
||||
#### Remote single flag evaluation via HTTP1.1/Connect
|
||||
|
||||
```sh
|
||||
# evaluates a single boolean flag
|
||||
curl -X POST -d '{"flagKey":"myBoolFlag","context":{}}' -H "Content-Type: application/json" "http://localhost:8013/flagd.evaluation.v1.Service/ResolveBoolean" | jq
|
||||
```
|
||||
|
||||
#### Remote single flag evaluation via HTTP1.1/OFREP
|
||||
|
||||
```sh
|
||||
# evaluates a single boolean flag
|
||||
curl -X POST -d '{"context":{}}' 'http://localhost:8016/ofrep/v1/evaluate/flags/myBoolFlag' | jq
|
||||
```
|
||||
|
||||
#### Remote single flag evaluation via gRPC
|
||||
|
||||
```sh
|
||||
# evaluates a single boolean flag
|
||||
grpcurl -import-path schemas/protobuf/flagd/evaluation/v1/ -proto evaluation.proto -plaintext -d '{"flagKey":"myBoolFlag"}' localhost:8013 flagd.evaluation.v1.Service/ResolveBoolean | jq
|
||||
```
|
||||
|
||||
#### Remote bulk evaluation via HTTP1.1/OFREP
|
||||
|
||||
```sh
|
||||
# evaluates flags in bulk
|
||||
curl -X POST -d '{"context":{}}' 'http://localhost:8016/ofrep/v1/evaluate/flags' | jq
|
||||
```
|
||||
|
||||
#### Remote bulk evaluation via gRPC
|
||||
|
||||
```sh
|
||||
# evaluates flags in bulk
|
||||
grpcurl -import-path schemas/protobuf/flagd/evaluation/v1/ -proto evaluation.proto -plaintext -d '{}' localhost:8013 flagd.evaluation.v1.Service/ResolveAll | jq
|
||||
```
|
||||
|
||||
#### Remote event streaming via gRPC
|
||||
|
||||
```sh
|
||||
# notifies of flag changes (but does not evaluate)
|
||||
grpcurl -import-path schemas/protobuf/flagd/evaluation/v1/ -proto evaluation.proto -plaintext -d '{}' localhost:8013 flagd.evaluation.v1.Service/EventStream
|
||||
```
|
||||
|
||||
#### Flag configuration fetch via gRPC
|
||||
|
||||
```sh
|
||||
# sends back a representation of all flags
|
||||
grpcurl -import-path schemas/protobuf/flagd/sync/v1/ -proto sync.proto -plaintext localhost:8015 flagd.sync.v1.FlagSyncService/FetchAllFlags | jq
|
||||
```
|
||||
|
||||
#### Flag synchronization stream via gRPC
|
||||
|
||||
```sh
|
||||
# will open a persistent stream which sends flag changes when the watched source is modified
|
||||
grpcurl -import-path schemas/protobuf/flagd/sync/v1/ -proto sync.proto -plaintext localhost:8015 flagd.sync.v1.FlagSyncService/SyncFlags | jq
|
||||
```
|
||||
|
||||
## DCO Sign-Off
|
||||
|
||||
A DCO (Developer Certificate of Origin) sign-off is a line placed at the end of
|
||||
|
|
22
Makefile
22
Makefile
|
@ -47,19 +47,12 @@ test-flagd:
|
|||
go test -race -covermode=atomic -cover -short ./flagd/pkg/... -coverprofile=flagd-coverage.out
|
||||
test-flagd-proxy:
|
||||
go test -race -covermode=atomic -cover -short ./flagd-proxy/pkg/... -coverprofile=flagd-proxy-coverage.out
|
||||
flagd-benchmark-test:
|
||||
go test -bench=Bench -short -benchtime=5s -benchmem ./core/... | tee benchmark.txt
|
||||
flagd-integration-test-harness:
|
||||
# target used to start a locally built flagd with the e2e flags
|
||||
cd flagd; go run main.go start -f file:../test-harness/flags/testing-flags.json -f file:../test-harness/flags/custom-ops.json -f file:../test-harness/flags/evaluator-refs.json -f file:../test-harness/flags/zero-flags.json -f file:../test-harness/flags/edge-case-flags.json
|
||||
flagd-integration-test: # dependent on flagd-e2e-test-harness if not running in github actions
|
||||
go test -count=1 -cover ./test/integration $(ARGS)
|
||||
flagd-integration-test: # dependent on ./bin/flagd start -f file:test-harness/flags/testing-flags.json -f file:test-harness/flags/custom-ops.json -f file:test-harness/flags/evaluator-refs.json -f file:test-harness/flags/zero-flags.json
|
||||
go test -cover ./test/integration $(ARGS)
|
||||
run: # default to flagd
|
||||
make run-flagd
|
||||
run-flagd:
|
||||
cd flagd; go run main.go start -f file:../config/samples/example_flags.flagd.json
|
||||
run-flagd-selector-demo:
|
||||
cd flagd; go run main.go start -f file:../config/samples/example_flags.flagd.json -f file:../config/samples/example_flags.flagd.2.json
|
||||
install:
|
||||
cp systemd/flagd.service /etc/systemd/system/flagd.service
|
||||
mkdir -p /etc/flagd
|
||||
|
@ -73,11 +66,8 @@ uninstall:
|
|||
rm /etc/systemd/system/flagd.service
|
||||
rm -f $(DESTDIR)$(PREFIX)/bin/flagd
|
||||
lint:
|
||||
go install -v github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.2.1
|
||||
$(foreach module, $(ALL_GO_MOD_DIRS), ${GOPATH}/bin/golangci-lint run $(module)/...;)
|
||||
lint-fix:
|
||||
go install -v github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.2.1
|
||||
$(foreach module, $(ALL_GO_MOD_DIRS), ${GOPATH}/bin/golangci-lint run --fix $(module)/...;)
|
||||
go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2
|
||||
$(foreach module, $(ALL_GO_MOD_DIRS), ${GOPATH}/bin/golangci-lint run --deadline=5m --timeout=5m $(module)/... || exit;)
|
||||
install-mockgen:
|
||||
go install go.uber.org/mock/mockgen@v0.4.0
|
||||
mockgen: install-mockgen
|
||||
|
@ -144,8 +134,8 @@ update-public-schema: pull-schemas-submodule
|
|||
|
||||
.PHONY: run-web-docs
|
||||
run-web-docs: generate-docs generate-proto-docs
|
||||
docker build -t flag-docs:latest . --load \
|
||||
&& docker run --rm -it -p 8000:8000 -v ${PWD}:/docs flag-docs:latest
|
||||
docker build -t squidfunk/mkdocs-material . \
|
||||
&& docker run --rm -it -p 8000:8000 -v ${PWD}:/docs squidfunk/mkdocs-material
|
||||
|
||||
# Run the playground app in dev mode
|
||||
# See the readme in the playground-app folder for more details
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/certreloader 15.986s
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/open-feature/flagd/core/pkg/evaluator
|
||||
cpu: 11th Gen Intel(R) Core(TM) i9-11950H @ 2.60GHz
|
||||
BenchmarkFractionalEvaluation/test_a@faas.com-16 423930 13316 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkFractionalEvaluation/test_b@faas.com-16 469594 13677 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkFractionalEvaluation/test_c@faas.com-16 569103 13286 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkFractionalEvaluation/test_d@faas.com-16 412386 13023 ns/op 7229 B/op 135 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_staticBoolFlag-16 3106903 1792 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_targetingBoolFlag-16 448164 11250 ns/op 6065 B/op 87 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_staticObjectFlag-16 3958750 1476 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_missingFlag-16 5331808 1353 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveBooleanValue/test_disabledFlag-16 4530751 1301 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveStringValue/test_staticStringFlag-16 4583056 1525 ns/op 1040 B/op 13 allocs/op
|
||||
BenchmarkResolveStringValue/test_targetingStringFlag-16 839954 10388 ns/op 6097 B/op 89 allocs/op
|
||||
BenchmarkResolveStringValue/test_staticObjectFlag-16 4252830 1677 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveStringValue/test_missingFlag-16 3743324 1495 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveStringValue/test_disabledFlag-16 3495699 1709 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_staticFloatFlag-16 4382868 1511 ns/op 1024 B/op 13 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_targetingFloatFlag-16 867987 10344 ns/op 6081 B/op 89 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_staticObjectFlag-16 3913120 1695 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_missingFlag-16 3910468 1349 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveFloatValue/test:_disabledFlag-16 3642919 1666 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveIntValue/test_staticIntFlag-16 4077288 1349 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveIntValue/test_targetingNumberFlag-16 922383 7601 ns/op 6065 B/op 87 allocs/op
|
||||
BenchmarkResolveIntValue/test_staticObjectFlag-16 4995128 1229 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveIntValue/test_missingFlag-16 5574153 1274 ns/op 768 B/op 12 allocs/op
|
||||
BenchmarkResolveIntValue/test_disabledFlag-16 3633708 1734 ns/op 1072 B/op 13 allocs/op
|
||||
BenchmarkResolveObjectValue/test_staticObjectFlag-16 1624102 4559 ns/op 2243 B/op 37 allocs/op
|
||||
BenchmarkResolveObjectValue/test_targetingObjectFlag-16 443880 11995 ns/op 7283 B/op 109 allocs/op
|
||||
BenchmarkResolveObjectValue/test_staticBoolFlag-16 3462445 1665 ns/op 1008 B/op 11 allocs/op
|
||||
BenchmarkResolveObjectValue/test_missingFlag-16 4207567 1458 ns/op 784 B/op 12 allocs/op
|
||||
BenchmarkResolveObjectValue/test_disabledFlag-16 3407262 1848 ns/op 1072 B/op 13 allocs/op
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/evaluator 239.506s
|
||||
? github.com/open-feature/flagd/core/pkg/evaluator/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/logger 0.003s
|
||||
? github.com/open-feature/flagd/core/pkg/model [no test files]
|
||||
? github.com/open-feature/flagd/core/pkg/service [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/service/ofrep 0.002s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/store 0.003s
|
||||
? github.com/open-feature/flagd/core/pkg/sync [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/blob 0.016s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/builder 0.018s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/builder/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/file 1.007s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/grpc 8.011s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/grpc/credentials 0.008s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/grpc/credentials/mock [no test files]
|
||||
? github.com/open-feature/flagd/core/pkg/sync/grpc/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/grpc/nameresolvers 0.002s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/http 4.006s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/http/mock [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/sync/kubernetes 0.016s
|
||||
? github.com/open-feature/flagd/core/pkg/sync/testing [no test files]
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/telemetry 0.016s
|
||||
PASS
|
||||
ok github.com/open-feature/flagd/core/pkg/utils 0.002s
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"$schema": "https://flagd.dev/schema/v0/flags.json",
|
||||
"metadata": {
|
||||
"flagSetId": "other",
|
||||
"version": "v1"
|
||||
},
|
||||
"flags": {
|
||||
"myStringFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"dupe1": "dupe1",
|
||||
"dupe2": "dupe2"
|
||||
},
|
||||
"defaultVariant": "dupe1"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,9 +1,5 @@
|
|||
{
|
||||
"$schema": "https://flagd.dev/schema/v0/flags.json",
|
||||
"metadata": {
|
||||
"flagSetId": "example",
|
||||
"version": "v1"
|
||||
},
|
||||
"flags": {
|
||||
"myBoolFlag": {
|
||||
"state": "ENABLED",
|
||||
|
@ -11,10 +7,7 @@
|
|||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": "on",
|
||||
"metadata": {
|
||||
"version": "v2"
|
||||
}
|
||||
"defaultVariant": "on"
|
||||
},
|
||||
"myStringFlag": {
|
||||
"state": "ENABLED",
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
{
|
||||
"$schema": "https://flagd.dev/schema/v0/flags.json",
|
||||
"metadata": {
|
||||
"version": "v2"
|
||||
},
|
||||
"flags": {
|
||||
"myBoolFlag": {
|
||||
"state": "ENABLED",
|
||||
|
|
|
@ -1,234 +1,5 @@
|
|||
# Changelog
|
||||
|
||||
## [0.12.1](https://github.com/open-feature/flagd/compare/core/v0.12.0...core/v0.12.1) (2025-07-28)
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* add back file-delete test ([#1694](https://github.com/open-feature/flagd/issues/1694)) ([750aa17](https://github.com/open-feature/flagd/commit/750aa176b5a8dd24a9daaff985ff6efeb084c758))
|
||||
* fix benchmark ([#1698](https://github.com/open-feature/flagd/issues/1698)) ([5e2d7d7](https://github.com/open-feature/flagd/commit/5e2d7d7176ba05e667cd92acd7decb531a8de2f6))
|
||||
|
||||
## [0.12.0](https://github.com/open-feature/flagd/compare/core/v0.11.8...core/v0.12.0) (2025-07-21)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* remove sync.Type ([#1691](https://github.com/open-feature/flagd/issues/1691))
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* update to latest otel semconv ([#1668](https://github.com/open-feature/flagd/issues/1668)) ([81855d7](https://github.com/open-feature/flagd/commit/81855d76f94a09251a19a05f830cc1d11ab6b566))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* Add support for HTTP eTag header and 304 no change response ([#1645](https://github.com/open-feature/flagd/issues/1645)) ([ea3be4f](https://github.com/open-feature/flagd/commit/ea3be4f9010644132795bb60b36fb7705f901b62))
|
||||
* remove sync.Type ([#1691](https://github.com/open-feature/flagd/issues/1691)) ([ac647e0](https://github.com/open-feature/flagd/commit/ac647e065636071f5bc065a9a084461cea692166))
|
||||
|
||||
## [0.11.8](https://github.com/open-feature/flagd/compare/core/v0.11.7...core/v0.11.8) (2025-07-15)
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to 08b4c52 ([#1682](https://github.com/open-feature/flagd/issues/1682)) ([68d04e2](https://github.com/open-feature/flagd/commit/68d04e21e63c63d6054fcd6aebfb864e8b3a597e))
|
||||
|
||||
## [0.11.7](https://github.com/open-feature/flagd/compare/core/v0.11.6...core/v0.11.7) (2025-07-15)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* general err if targeting variant not in variants ([#1680](https://github.com/open-feature/flagd/issues/1680)) ([6cabfc8](https://github.com/open-feature/flagd/commit/6cabfc8ff3bd4ad69699a72724495e84cdec0cc3))
|
||||
|
||||
## [0.11.6](https://github.com/open-feature/flagd/compare/core/v0.11.5...core/v0.11.6) (2025-07-10)
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add sync_context to SyncFlags ([#1642](https://github.com/open-feature/flagd/issues/1642)) ([07a45d9](https://github.com/open-feature/flagd/commit/07a45d9b2275584fa92ff33cbe5e5c7d7864db38))
|
||||
* allowing null/missing defaultValue ([#1659](https://github.com/open-feature/flagd/issues/1659)) ([3f6b78c](https://github.com/open-feature/flagd/commit/3f6b78c8ccab75e9c07d26741c4b206fd0b722ee))
|
||||
|
||||
## [0.11.5](https://github.com/open-feature/flagd/compare/core/v0.11.4...core/v0.11.5) (2025-06-13)
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add server-side deadline to sync service ([#1638](https://github.com/open-feature/flagd/issues/1638)) ([b70fa06](https://github.com/open-feature/flagd/commit/b70fa06b66e1fe8a28728441a7ccd28c6fe6a0c6))
|
||||
* updating context using headers ([#1641](https://github.com/open-feature/flagd/issues/1641)) ([ba34815](https://github.com/open-feature/flagd/commit/ba348152b6e7b6bd7473bb11846aac7db316c88e))
|
||||
|
||||
## [0.11.4](https://github.com/open-feature/flagd/compare/core/v0.11.3...core/v0.11.4) (2025-05-28)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* incorrect comparison used for time ([#1608](https://github.com/open-feature/flagd/issues/1608)) ([8c5ac2f](https://github.com/open-feature/flagd/commit/8c5ac2f2c31e092cbe6ddb4d3c1adeeeb04e9ef9))
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update dependency go to v1.24.1 ([#1559](https://github.com/open-feature/flagd/issues/1559)) ([cd46044](https://github.com/open-feature/flagd/commit/cd4604471bba0a1df67bf87653a38df3caf9d20f))
|
||||
* **security:** upgrade dependency versions ([#1632](https://github.com/open-feature/flagd/issues/1632)) ([761d870](https://github.com/open-feature/flagd/commit/761d870a3c563b8eb1b83ee543b41316c98a1d48))
|
||||
|
||||
|
||||
### 🔄 Refactoring
|
||||
|
||||
* Refactor the cron function in http sync ([#1600](https://github.com/open-feature/flagd/issues/1600)) ([babcacf](https://github.com/open-feature/flagd/commit/babcacfe4dd1244dda954823d8a3ed2019c8752b))
|
||||
* removed hardcoded metric export interval and use otel default ([#1621](https://github.com/open-feature/flagd/issues/1621)) ([81c66eb](https://github.com/open-feature/flagd/commit/81c66ebf2b82fc6874ab325569f52801d5ab8e5e))
|
||||
|
||||
## [0.11.3](https://github.com/open-feature/flagd/compare/core/v0.11.2...core/v0.11.3) (2025-03-25)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to 9b0ee43 ([#1598](https://github.com/open-feature/flagd/issues/1598)) ([0587ce4](https://github.com/open-feature/flagd/commit/0587ce44e60b643ff6960c1eaf4461f933ea95b7))
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to e840a03 ([#1587](https://github.com/open-feature/flagd/issues/1587)) ([9ee0c57](https://github.com/open-feature/flagd/commit/9ee0c573d6dbfa0c4e9b18c9da7313094ea56916))
|
||||
* **deps:** update module connectrpc.com/otelconnect to v0.7.2 ([#1574](https://github.com/open-feature/flagd/issues/1574)) ([6094dce](https://github.com/open-feature/flagd/commit/6094dce5c0472f593b79d6d40e080f9b8d6503e5))
|
||||
* **deps:** update module github.com/google/go-cmp to v0.7.0 ([#1569](https://github.com/open-feature/flagd/issues/1569)) ([6e9dbd2](https://github.com/open-feature/flagd/commit/6e9dbd2dbf8365f839e353f53cb638847a1f05d6))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.21.1 ([#1576](https://github.com/open-feature/flagd/issues/1576)) ([cd95193](https://github.com/open-feature/flagd/commit/cd95193f71fd465ffd1b177fa492aa84d8414a87))
|
||||
* **deps:** update module google.golang.org/grpc to v1.71.0 ([#1578](https://github.com/open-feature/flagd/issues/1578)) ([5c2c64f](https://github.com/open-feature/flagd/commit/5c2c64f878b8603dd37cbfd79b0e1588e4b5a3c6))
|
||||
* incorrect metadata returned per source ([#1599](https://github.com/open-feature/flagd/issues/1599)) ([b333e11](https://github.com/open-feature/flagd/commit/b333e11ecfe54f72c44ee61b3dcb1f2a487c94d4))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* accept version numbers which are not strings ([#1589](https://github.com/open-feature/flagd/issues/1589)) ([6a13796](https://github.com/open-feature/flagd/commit/6a137967a258e799cbac9e3bb3927a07412c2a7b))
|
||||
|
||||
## [0.11.2](https://github.com/open-feature/flagd/compare/core/v0.11.1...core/v0.11.2) (2025-02-21)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update golang.org/x/exp digest to 939b2ce ([#1555](https://github.com/open-feature/flagd/issues/1555)) ([23afa9c](https://github.com/open-feature/flagd/commit/23afa9c18c27885bdae0f5c4ebdc30e780e9da71))
|
||||
* **deps:** update golang.org/x/exp digest to f9890c6 ([#1551](https://github.com/open-feature/flagd/issues/1551)) ([02c4b42](https://github.com/open-feature/flagd/commit/02c4b4250131ca819c85dcf10c2d78e0c218469f))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.5-20250127221518-be6d1143b690.1 ([#1549](https://github.com/open-feature/flagd/issues/1549)) ([d3eb44e](https://github.com/open-feature/flagd/commit/d3eb44ed45a54bd9152b7477cce17be90016683c))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.4 ([#1556](https://github.com/open-feature/flagd/issues/1556)) ([0dfa799](https://github.com/open-feature/flagd/commit/0dfa79956695849f3a703554525759093931a01d))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.21.0 ([#1568](https://github.com/open-feature/flagd/issues/1568)) ([a3d4162](https://github.com/open-feature/flagd/commit/a3d41625a2b79452c0732af29d0b4f320e74fe8b))
|
||||
* **deps:** update module golang.org/x/crypto to v0.33.0 ([#1552](https://github.com/open-feature/flagd/issues/1552)) ([7cef153](https://github.com/open-feature/flagd/commit/7cef153a275a4fac5099f5a52013dcd227a79bb3))
|
||||
* **deps:** update module golang.org/x/mod to v0.23.0 ([#1544](https://github.com/open-feature/flagd/issues/1544)) ([6fe7bd2](https://github.com/open-feature/flagd/commit/6fe7bd2a3e82dfc81068d9d95d8c3a4acc16456c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* Adding gRPC dial option override to grpc_sync.go ([#1563](https://github.com/open-feature/flagd/issues/1563)) ([1a97ca5](https://github.com/open-feature/flagd/commit/1a97ca5f81582e6d1f139a61e0e49007ad173d3f))
|
||||
|
||||
## [0.11.1](https://github.com/open-feature/flagd/compare/core/v0.11.0...core/v0.11.1) (2025-02-04)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module golang.org/x/sync to v0.11.0 ([#1543](https://github.com/open-feature/flagd/issues/1543)) ([7d6c0dc](https://github.com/open-feature/flagd/commit/7d6c0dc6e6e6955af1e5225807deeb2b6797900b))
|
||||
|
||||
## [0.11.0](https://github.com/open-feature/flagd/compare/core/v0.10.8...core/v0.11.0) (2025-01-31)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* flagSetMetadata in OFREP/ResolveAll, core refactors ([#1540](https://github.com/open-feature/flagd/issues/1540))
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to bb76343 ([#1534](https://github.com/open-feature/flagd/issues/1534)) ([8303353](https://github.com/open-feature/flagd/commit/8303353a1b503ef34b8e46d9bf77ce53c067ef3b))
|
||||
* **deps:** update golang.org/x/exp digest to 3edf0e9 ([#1538](https://github.com/open-feature/flagd/issues/1538)) ([7a06567](https://github.com/open-feature/flagd/commit/7a0656713a8c2ac3d456a3a300fe137debee0edd))
|
||||
* **deps:** update golang.org/x/exp digest to e0ece0d ([#1539](https://github.com/open-feature/flagd/issues/1539)) ([4281c6e](https://github.com/open-feature/flagd/commit/4281c6e80b233a162436fea3640bf5d061d40b96))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20250127221518-be6d1143b690.2 ([#1536](https://github.com/open-feature/flagd/issues/1536)) ([e23060f](https://github.com/open-feature/flagd/commit/e23060f24b2a714ae748e6b37d0d06b7caa1c95c))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.4-20241220192239-696330adaff0.1 ([#1529](https://github.com/open-feature/flagd/issues/1529)) ([8881a80](https://github.com/open-feature/flagd/commit/8881a804b4055da0127a16b8fc57022d24906e1b))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.4-20250127221518-be6d1143b690.1 ([#1537](https://github.com/open-feature/flagd/issues/1537)) ([f74207b](https://github.com/open-feature/flagd/commit/f74207bc13b75bae4275bc486df51e2da569dd41))
|
||||
* **deps:** update module google.golang.org/grpc to v1.70.0 ([#1528](https://github.com/open-feature/flagd/issues/1528)) ([79b2b0a](https://github.com/open-feature/flagd/commit/79b2b0a6bbd48676dcbdd2393feb8247529bf29c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* flagSetMetadata in OFREP/ResolveAll, core refactors ([#1540](https://github.com/open-feature/flagd/issues/1540)) ([b49abf9](https://github.com/open-feature/flagd/commit/b49abf95069da93bdf8369c8aa0ae40e698df760))
|
||||
* support yaml in blob, file, and http syncs ([#1522](https://github.com/open-feature/flagd/issues/1522)) ([76d673a](https://github.com/open-feature/flagd/commit/76d673ae8f765512270e6498569c0ce3d54a60bf))
|
||||
|
||||
## [0.10.8](https://github.com/open-feature/flagd/compare/core/v0.10.7...core/v0.10.8) (2025-01-19)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.3 ([#1520](https://github.com/open-feature/flagd/issues/1520)) ([db2f990](https://github.com/open-feature/flagd/commit/db2f99021dfd676d2fd0c6af6af7e77783ee31ce))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1524](https://github.com/open-feature/flagd/issues/1524)) ([eeae9a6](https://github.com/open-feature/flagd/commit/eeae9a64caf93356fd663cc735cc422edcf9e132))
|
||||
|
||||
## [0.10.7](https://github.com/open-feature/flagd/compare/core/v0.10.6...core/v0.10.7) (2025-01-16)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.3-20241220192239-696330adaff0.1 ([#1513](https://github.com/open-feature/flagd/issues/1513)) ([64c5787](https://github.com/open-feature/flagd/commit/64c57875b032edcef2e2d230e7735990e01b72b8))
|
||||
|
||||
## [0.10.6](https://github.com/open-feature/flagd/compare/core/v0.10.5...core/v0.10.6) (2025-01-15)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to 37baa2c ([#1499](https://github.com/open-feature/flagd/issues/1499)) ([1a853f7](https://github.com/open-feature/flagd/commit/1a853f79dc41523fd6dcb1ae6ca9745947955cbc))
|
||||
* **deps:** update github.com/open-feature/flagd-schemas digest to b81a56e ([#1391](https://github.com/open-feature/flagd/issues/1391)) ([6a3d8ac](https://github.com/open-feature/flagd/commit/6a3d8ac2511c32bd0dc77bba0169679aa9bf6ca6))
|
||||
* **deps:** update golang.org/x/exp digest to 7588d65 ([#1495](https://github.com/open-feature/flagd/issues/1495)) ([242e594](https://github.com/open-feature/flagd/commit/242e59450c71c682b56e554830ea3003bdbf9622))
|
||||
* **deps:** update golang.org/x/exp digest to b2144cd ([#1320](https://github.com/open-feature/flagd/issues/1320)) ([a692b00](https://github.com/open-feature/flagd/commit/a692b009ae8e7dc928d0fd65236b404192c99562))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20241220192239-696330adaff0.1 ([#1489](https://github.com/open-feature/flagd/issues/1489)) ([53add83](https://github.com/open-feature/flagd/commit/53add83a491c6e00e0d9b1b64a9461e5973edca7))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20241220192239-696330adaff0.2 ([#1492](https://github.com/open-feature/flagd/issues/1492)) ([9f1d94a](https://github.com/open-feature/flagd/commit/9f1d94a42ac00ecf5fc58c07a76c350e2e4ec2f6))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.0-20241220192239-696330adaff0.1 ([#1490](https://github.com/open-feature/flagd/issues/1490)) ([6edce72](https://github.com/open-feature/flagd/commit/6edce72e8cff01ea13cbd15d604b35ccc8337f50))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.2-20241220192239-696330adaff0.1 ([#1502](https://github.com/open-feature/flagd/issues/1502)) ([426c36e](https://github.com/open-feature/flagd/commit/426c36e838b9ded3a23f933e66e963c8110c0ddb))
|
||||
* **deps:** update module connectrpc.com/connect to v1.18.1 ([#1507](https://github.com/open-feature/flagd/issues/1507)) ([89d3259](https://github.com/open-feature/flagd/commit/89d32591db784458ce9b4cca36662ea502418bc5))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.0 ([#1496](https://github.com/open-feature/flagd/issues/1496)) ([e1fe149](https://github.com/open-feature/flagd/commit/e1fe1490fd1c26b9c566ff5ddef666c0fa74b2d5))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.7.1 ([#1509](https://github.com/open-feature/flagd/issues/1509)) ([9d06812](https://github.com/open-feature/flagd/commit/9d0681270f26bb91777fa2b8a792a4b0ccd07304))
|
||||
* **deps:** update module golang.org/x/crypto to v0.32.0 ([#1497](https://github.com/open-feature/flagd/issues/1497)) ([63a34d2](https://github.com/open-feature/flagd/commit/63a34d23aedcd798ff9f4cd47cdaddca35416423))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.2 ([#1484](https://github.com/open-feature/flagd/issues/1484)) ([6b40ad3](https://github.com/open-feature/flagd/commit/6b40ad34c83da4a3116e7cad4139a63a6c918097))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.4 ([#1510](https://github.com/open-feature/flagd/issues/1510)) ([76d6353](https://github.com/open-feature/flagd/commit/76d6353840ab8e7c93bdb0802eb1c49fc6fe1dc0))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1470](https://github.com/open-feature/flagd/issues/1470)) ([26b0b1a](https://github.com/open-feature/flagd/commit/26b0b1af8bc4b3a393c3453784b50f167f13f743))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add ssl support to sync service ([#1479](https://github.com/open-feature/flagd/issues/1479)) ([#1501](https://github.com/open-feature/flagd/issues/1501)) ([d50fcc8](https://github.com/open-feature/flagd/commit/d50fcc821c1ae043cb8cf77e464f7b738e2ff755))
|
||||
* support flag metadata ([#1476](https://github.com/open-feature/flagd/issues/1476)) ([13fbbad](https://github.com/open-feature/flagd/commit/13fbbad4d849b35884f429c0e74a71ece9cce2c9))
|
||||
|
||||
## [0.10.5](https://github.com/open-feature/flagd/compare/core/v0.10.4...core/v0.10.5) (2024-12-17)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update kubernetes packages to v0.31.2 ([#1430](https://github.com/open-feature/flagd/issues/1430)) ([0df8622](https://github.com/open-feature/flagd/commit/0df862215563545c33f518ab7a5ad42a19bf6adb))
|
||||
* **deps:** update kubernetes packages to v0.31.3 ([#1454](https://github.com/open-feature/flagd/issues/1454)) ([f56d7b0](https://github.com/open-feature/flagd/commit/f56d7b043c2d80ae4fe27e996c05a7cc1c2c1b28))
|
||||
* **deps:** update kubernetes packages to v0.31.4 ([#1461](https://github.com/open-feature/flagd/issues/1461)) ([431fbb4](https://github.com/open-feature/flagd/commit/431fbb49513bcdb21b09845f47c26e51e7e9f21b))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.35.2-20240906125204-0a6a901b42e8.1 ([#1451](https://github.com/open-feature/flagd/issues/1451)) ([8c6d91d](https://github.com/open-feature/flagd/commit/8c6d91d538d226b10cb954c23409902e9d245cda))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.0-20240906125204-0a6a901b42e8.1 ([#1475](https://github.com/open-feature/flagd/issues/1475)) ([0b11c6c](https://github.com/open-feature/flagd/commit/0b11c6cf612b244bda6bab119814647f3ce8de2e))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.6.0 ([#1460](https://github.com/open-feature/flagd/issues/1460)) ([dbc1da4](https://github.com/open-feature/flagd/commit/dbc1da4ba984c06972b57cf990d1d31c4b8323df))
|
||||
* **deps:** update module github.com/diegoholiveira/jsonlogic/v3 to v3.6.1 ([#1473](https://github.com/open-feature/flagd/issues/1473)) ([a3d899c](https://github.com/open-feature/flagd/commit/a3d899c5f8952181a6a987436e2255c2ab9176c5))
|
||||
* **deps:** update module github.com/fsnotify/fsnotify to v1.8.0 ([#1438](https://github.com/open-feature/flagd/issues/1438)) ([949c73b](https://github.com/open-feature/flagd/commit/949c73bd6ebadb30cfa3b7573b43d722f8d2a93d))
|
||||
* **deps:** update module github.com/stretchr/testify to v1.10.0 ([#1455](https://github.com/open-feature/flagd/issues/1455)) ([8c843df](https://github.com/open-feature/flagd/commit/8c843df7714b1f2d120c5cac8e40c7220cc0c05b))
|
||||
* **deps:** update module golang.org/x/crypto to v0.29.0 ([#1443](https://github.com/open-feature/flagd/issues/1443)) ([db96dd5](https://github.com/open-feature/flagd/commit/db96dd57b9de032fc4d15931bf907a7ed962f81b))
|
||||
* **deps:** update module golang.org/x/crypto to v0.30.0 ([#1457](https://github.com/open-feature/flagd/issues/1457)) ([dbdaa19](https://github.com/open-feature/flagd/commit/dbdaa199f0667f16d2a3b91867535ce93e63373c))
|
||||
* **deps:** update module golang.org/x/crypto to v0.31.0 ([#1463](https://github.com/open-feature/flagd/issues/1463)) ([b2245d7](https://github.com/open-feature/flagd/commit/b2245d7f73f1bde859b9627d337dd09ecd2f1a31))
|
||||
* **deps:** update module golang.org/x/mod to v0.22.0 ([#1444](https://github.com/open-feature/flagd/issues/1444)) ([ed064e1](https://github.com/open-feature/flagd/commit/ed064e134fb3a5edb0ec2d976f136af7e94d7f6d))
|
||||
* **deps:** update module google.golang.org/grpc to v1.68.0 ([#1442](https://github.com/open-feature/flagd/issues/1442)) ([cd27d09](https://github.com/open-feature/flagd/commit/cd27d098e6d8d8b0f681ef42d26dba1ebac67d12))
|
||||
* **deps:** update module google.golang.org/grpc to v1.68.1 ([#1456](https://github.com/open-feature/flagd/issues/1456)) ([0b6e2a1](https://github.com/open-feature/flagd/commit/0b6e2a1cd64910226d348c921b08a6de8013ac90))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.0 ([#1469](https://github.com/open-feature/flagd/issues/1469)) ([dd4869f](https://github.com/open-feature/flagd/commit/dd4869f5e095066f80c9d82d1be83155e7504d88))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1447](https://github.com/open-feature/flagd/issues/1447)) ([68b5794](https://github.com/open-feature/flagd/commit/68b5794180da84af9adc1f2cd80f929489969c1c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add context-value flag ([#1448](https://github.com/open-feature/flagd/issues/1448)) ([7ca092e](https://github.com/open-feature/flagd/commit/7ca092e478c937eca0c91357394499763545dc1c))
|
||||
* s3 support for the blob sync ([#1449](https://github.com/open-feature/flagd/issues/1449)) ([a9f7261](https://github.com/open-feature/flagd/commit/a9f7261e75bc064947ae14900e5c4edc4b49bec4))
|
||||
|
||||
## [0.10.4](https://github.com/open-feature/flagd/compare/core/v0.10.3...core/v0.10.4) (2024-10-28)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.35.1-20240906125204-0a6a901b42e8.1 ([#1420](https://github.com/open-feature/flagd/issues/1420)) ([1f06d5a](https://github.com/open-feature/flagd/commit/1f06d5a1837ea2b753974e96c2a1154d6cb3e582))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.20.5 ([#1425](https://github.com/open-feature/flagd/issues/1425)) ([583ba89](https://github.com/open-feature/flagd/commit/583ba894f2de794b36b6a1cc3bfceb9c46dc9d96))
|
||||
* **deps:** update module go.uber.org/mock to v0.5.0 ([#1427](https://github.com/open-feature/flagd/issues/1427)) ([0c6fd7f](https://github.com/open-feature/flagd/commit/0c6fd7fa688db992d4e58a202889cbfea07eebf6))
|
||||
* **deps:** update module gocloud.dev to v0.40.0 ([#1422](https://github.com/open-feature/flagd/issues/1422)) ([e0e4709](https://github.com/open-feature/flagd/commit/e0e4709243d8301bcbb0aaaa309be66944c1d9ed))
|
||||
* **deps:** update module golang.org/x/crypto to v0.28.0 ([#1416](https://github.com/open-feature/flagd/issues/1416)) ([fb272da](https://github.com/open-feature/flagd/commit/fb272da56e0eba12245309899888c18920b9a200))
|
||||
* **deps:** update module google.golang.org/grpc to v1.67.1 ([#1415](https://github.com/open-feature/flagd/issues/1415)) ([85a3a6b](https://github.com/open-feature/flagd/commit/85a3a6b46233fcc7cf71a0292b46c82ac8e66d7b))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* added custom grpc resolver ([#1424](https://github.com/open-feature/flagd/issues/1424)) ([e5007e2](https://github.com/open-feature/flagd/commit/e5007e2bcb6f049a3c54e09331065bb9abe215be))
|
||||
* support azure blob sync ([#1428](https://github.com/open-feature/flagd/issues/1428)) ([5c39cfe](https://github.com/open-feature/flagd/commit/5c39cfe30a3dead4f6db2c6f9ee4c12193cd479b))
|
||||
|
||||
## [0.10.3](https://github.com/open-feature/flagd/compare/core/v0.10.2...core/v0.10.3) (2024-09-23)
|
||||
|
||||
|
||||
|
|
193
core/go.mod
193
core/go.mod
|
@ -1,167 +1,126 @@
|
|||
module github.com/open-feature/flagd/core
|
||||
|
||||
go 1.24.0
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.24.4
|
||||
toolchain go1.22.7
|
||||
|
||||
require (
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20250529171031-ebdc14163473.2
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.36.6-20250529171031-ebdc14163473.1
|
||||
connectrpc.com/connect v1.18.1
|
||||
connectrpc.com/otelconnect v0.7.2
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.8.4
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20250707123415-08b4c52d3b86
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.45
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20240906125204-0a6a901b42e8.1
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.34.2-20240906125204-0a6a901b42e8.2
|
||||
connectrpc.com/connect v1.17.0
|
||||
connectrpc.com/otelconnect v0.7.1
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.5.3
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20240708163558-2aa89b314322
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.44
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/robfig/cron v1.2.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/twmb/murmur3 v1.1.8
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
github.com/zeebo/xxh3 v1.0.2
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0
|
||||
go.opentelemetry.io/otel/metric v1.37.0
|
||||
go.opentelemetry.io/otel/sdk v1.37.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0
|
||||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
go.uber.org/mock v0.5.2
|
||||
go.opentelemetry.io/otel v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0
|
||||
go.opentelemetry.io/otel/metric v1.30.0
|
||||
go.opentelemetry.io/otel/sdk v1.30.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0
|
||||
go.opentelemetry.io/otel/trace v1.30.0
|
||||
go.uber.org/mock v0.4.0
|
||||
go.uber.org/zap v1.27.0
|
||||
gocloud.dev v0.42.0
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/mod v0.25.0
|
||||
golang.org/x/sync v0.15.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gocloud.dev v0.39.0
|
||||
golang.org/x/crypto v0.27.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/mod v0.21.0
|
||||
golang.org/x/sync v0.8.0
|
||||
google.golang.org/grpc v1.67.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.33.2
|
||||
k8s.io/client-go v0.33.2
|
||||
k8s.io/apimachinery v0.31.1
|
||||
k8s.io/client-go v0.31.1
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go v0.121.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
cloud.google.com/go/storage v1.55.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.65 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
|
||||
github.com/aws/smithy-go v1.22.3 // indirect
|
||||
cloud.google.com/go v0.115.0 // indirect
|
||||
cloud.google.com/go/auth v0.8.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.0 // indirect
|
||||
cloud.google.com/go/iam v1.1.13 // indirect
|
||||
cloud.google.com/go/storage v1.43.0 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/google/wire v0.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.59.1 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
golang.org/x/net v0.29.0 // indirect
|
||||
golang.org/x/oauth2 v0.22.0 // indirect
|
||||
golang.org/x/sys v0.25.0 // indirect
|
||||
golang.org/x/term v0.24.0 // indirect
|
||||
golang.org/x/text v0.18.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/api v0.235.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/api v0.191.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/api v0.33.2 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.31.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.19.3 // indirect
|
||||
sigs.k8s.io/gateway-api v1.2.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.19.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
473
core/go.sum
473
core/go.sum
|
@ -1,153 +1,107 @@
|
|||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20250529171031-ebdc14163473.2 h1:TZ+7u106u7C7lgNctxG03ABliF46eLhcIZG5Mdo67/E=
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20250529171031-ebdc14163473.2/go.mod h1:4u0WLwfkLob3dC/F8qNctqhtiEv2Mlyi8YgCDDzgYDs=
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.36.6-20250529171031-ebdc14163473.1 h1:LdC4xAuUaNdduzQr5VvhjsgrCfpW9IYxYsjyCF0ANs0=
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.36.6-20250529171031-ebdc14163473.1/go.mod h1:cCQ49+ttXE2MZ/ciRNb0tCG+F3kj2ZVbP+0/psbhrLY=
|
||||
cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss=
|
||||
cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20240215170432-1e611e2999cc.1 h1:i30n9d9zBIbLOzwsH82w7g6EOv6xtPN/byveEH9B9KQ=
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20240215170432-1e611e2999cc.1/go.mod h1:aqQ1tVIXcuAslNu5a3xTSE9qvYEazL6pCZ/VLU7j1hc=
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20240906125204-0a6a901b42e8.1 h1:18ZObecoJfRbNQDeuW0PoBR829Mw8FrPrmWIbbaA5hs=
|
||||
buf.build/gen/go/open-feature/flagd/grpc/go v1.5.1-20240906125204-0a6a901b42e8.1/go.mod h1:WA65xyBj+VxPfJ3a+EqdZtWGeNdwqiaQO1sriHaNL1Y=
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.34.2-20240215170432-1e611e2999cc.2 h1:f0Zk6mcwiIZNs5pRkK8hfaE1NynStc2Z4hEnripAKTo=
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.34.2-20240215170432-1e611e2999cc.2/go.mod h1:Y4p3/wjytMsn6a+N0geaRNEilEqj2pS3qrPqRFO5cY0=
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.34.2-20240906125204-0a6a901b42e8.2 h1:z/+GDTJLJJjpTon7CK/pxrbwwtK7YmllO7jSTvGh0QA=
|
||||
buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.34.2-20240906125204-0a6a901b42e8.2/go.mod h1:OA7of1NYh+7uttnef5AxaNjL7y1AdjLpZ4DD1a4vuZg=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw=
|
||||
cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=
|
||||
cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU=
|
||||
cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||
cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0=
|
||||
cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
|
||||
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
||||
connectrpc.com/otelconnect v0.7.2 h1:WlnwFzaW64dN06JXU+hREPUGeEzpz3Acz2ACOmN8cMI=
|
||||
connectrpc.com/otelconnect v0.7.2/go.mod h1:JS7XUKfuJs2adhCnXhNHPHLz6oAaZniCJdSF00OZSew=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc=
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
|
||||
cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
|
||||
cloud.google.com/go/auth v0.8.1 h1:QZW9FjC5lZzN864p13YxvAtGUlQ+KgRL+8Sg45Z6vxo=
|
||||
cloud.google.com/go/auth v0.8.1/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
|
||||
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
|
||||
cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4=
|
||||
cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus=
|
||||
cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE=
|
||||
cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
connectrpc.com/connect v1.17.0 h1:W0ZqMhtVzn9Zhn2yATuUokDLO5N+gIuBWMOnsQrfmZk=
|
||||
connectrpc.com/connect v1.17.0/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
||||
connectrpc.com/otelconnect v0.7.1 h1:scO5pOb0i4yUE66CnNrHeK1x51yq0bE0ehPg6WvzXJY=
|
||||
connectrpc.com/otelconnect v0.7.1/go.mod h1:dh3bFgHBTb2bkqGCeVVOtHJreSns7uu9wwL2Tbz17ms=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
|
||||
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
|
||||
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.12 h1:Y/2a+jLPrPbHpFkpAAYkVEtJmxORlXoo5k2g1fa2sUo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.12/go.mod h1:xse1YTjmORlb/6fhkWi8qJh3cvZi4JoVNhc+NbJt4kI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.65 h1:q+nV2yYegofO/SUXruT+pn4KxkxmaQ++1B/QedcKBFM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.65/go.mod h1:4zyjAuGOdikpNYiSGpsGz8hLGmUzlY8pc8r9QQ/RXYQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 h1:6VFPH/Zi9xYFMJKPQOX5URYkQoXRWeJ7V/7Y6ZDYoms=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 h1:pdgODsAhGo4dvzC3JAG5Ce0PX8kWXrTZGx+jxADD+5E=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.2/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0 h1:90uX0veLKcdHVfvxhkWUQSCi5VabtwMLFutYiRke4oo=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
|
||||
github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k=
|
||||
github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC9OpLedkZ51DWl8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
|
||||
github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
|
||||
github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k=
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.8.4 h1:IVVU/VLz2hn10ImbmibjiUkdVsSFIB1vfDaOVsaipH4=
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.8.4/go.mod h1:OYRb6FSTVmMM+MNQ7ElmMsczyNSepw+OU4Z8emDSi4w=
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.5.3 h1:CPyZQ3fOgiIDZ1yWzPGUpyht5tYTOnRoN913c0mkXZw=
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.5.3/go.mod h1:3nnfWovrlZq2rTpucrJ2KMIS8TMf6IoFneofmeqk/qk=
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
|
||||
github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
|
@ -162,12 +116,10 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
|||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
@ -180,8 +132,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
|
@ -189,8 +141,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo=
|
||||
github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI=
|
||||
github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=
|
||||
|
@ -200,36 +152,34 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
|||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
|
||||
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
|
||||
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
|
||||
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
|
||||
github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
|
@ -247,55 +197,44 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
|||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20250319190911-9b0ee43ecc47 h1:c6nodciz/xeU0xiAcDQ5MBW34DnPoi5/lEgjV5kZeZA=
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20250319190911-9b0ee43ecc47/go.mod h1:WKtwo1eW9/K6D+4HfgTXWBqCDzpvMhDa5eRxW7R5B2U=
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20250707123415-08b4c52d3b86 h1:r3e+qs3QUdf4+lUi2ZZnSHgYkjeLIb5yu5jo+ypA8iw=
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20250707123415-08b4c52d3b86/go.mod h1:WKtwo1eW9/K6D+4HfgTXWBqCDzpvMhDa5eRxW7R5B2U=
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.45 h1:URnUf22ZoAx7/W8ek8dXCBYgY8FmnFEuEOSDLROQafY=
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.45/go.mod h1:PYh/Hfyna1lZYZUeu/8LM0qh0ZgpH7kKEXRLYaaRhGs=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20240708163558-2aa89b314322 h1:5zbNHqcZAc9jlhSrC0onuVL2RPpvYcDaNvW2wOZBfUY=
|
||||
github.com/open-feature/flagd-schemas v0.2.9-0.20240708163558-2aa89b314322/go.mod h1:WKtwo1eW9/K6D+4HfgTXWBqCDzpvMhDa5eRxW7R5B2U=
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.44 h1:0r4Z+RnJltuHdRBv79NFgAckhna6/M3Wcec6gzNX5vI=
|
||||
github.com/open-feature/open-feature-operator/apis v0.2.44/go.mod h1:xB2uLzvUkbydieX7q6/NqannBz3bt/e5BS2DeOyyw4Q=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
|
||||
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
|
@ -312,63 +251,55 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
|||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
|
||||
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
|
||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
|
||||
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 h1:zG8GlgXCJQd5BU98C0hZnBbElszTmUgCNCfYneaDL0A=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0/go.mod h1:hOfBCz8kv/wuq73Mx2H2QnWokh/kHZxkh6SNF2bdKtw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0 h1:HHf+wKS6o5++XZhS98wvILrLVgHxjA/AMjqHKes+uzo=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0/go.mod h1:R8GpRXTZrqvXHDEGVH5bF6+JqAZcK8PjJcZ5nGhEWiE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
|
||||
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
|
||||
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0=
|
||||
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
|
||||
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
|
||||
go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
|
||||
go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y=
|
||||
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
|
||||
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
gocloud.dev v0.42.0 h1:qzG+9ItUL3RPB62/Amugws28n+4vGZXEoJEAMfjutzw=
|
||||
gocloud.dev v0.42.0/go.mod h1:zkaYAapZfQisXOA4bzhsbA4ckiStGQ3Psvs9/OQ5dPM=
|
||||
gocloud.dev v0.39.0 h1:EYABYGhAalPUaMrbSKOr5lejxoxvXj99nE8XFtsDgds=
|
||||
gocloud.dev v0.39.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs=
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
|
@ -378,8 +309,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
|||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -395,11 +326,11 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -409,8 +340,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -419,21 +350,20 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
|
@ -441,10 +371,10 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -457,36 +387,36 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
|||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk=
|
||||
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/api v0.235.0 h1:C3MkpQSRxS1Jy6AkzTGKKrpSCOd2WOGrezZ+icKSkKo=
|
||||
google.golang.org/api v0.235.0/go.mod h1:QpeJkemzkFKe5VCE/PMv7GsUfn9ZF+u+q1Q7w6ckxTg=
|
||||
google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk=
|
||||
google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phrYMtzX11k+XkzMGfRAet42PmoTATM=
|
||||
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
|
||||
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -496,8 +426,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -513,30 +443,25 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY=
|
||||
k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs=
|
||||
k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40=
|
||||
k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ=
|
||||
k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY=
|
||||
k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E=
|
||||
k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo=
|
||||
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
|
||||
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
|
||||
k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
|
||||
k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
|
||||
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
|
||||
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
|
||||
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw=
|
||||
sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM=
|
||||
sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM=
|
||||
sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks=
|
||||
k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
|
||||
sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -11,8 +11,6 @@ import (
|
|||
)
|
||||
|
||||
func TestFractionalEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
commonFlags := Flags{
|
||||
|
@ -460,13 +458,8 @@ func TestFractionalEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
@ -493,8 +486,6 @@ func TestFractionalEvaluation(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkFractionalEvaluation(b *testing.B) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
flags := Flags{
|
||||
|
@ -517,7 +508,7 @@ func BenchmarkFractionalEvaluation(b *testing.B) {
|
|||
},
|
||||
{
|
||||
"fractional": [
|
||||
{"var": "email"},
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
|
@ -551,41 +542,41 @@ func BenchmarkFractionalEvaluation(b *testing.B) {
|
|||
expectedReason string
|
||||
expectedErrorCode string
|
||||
}{
|
||||
"test_a@faas.com": {
|
||||
"test@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_a@faas.com",
|
||||
},
|
||||
expectedVariant: "blue",
|
||||
expectedValue: "#0000FF",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test_b@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_b@faas.com",
|
||||
"email": "test@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test_c@faas.com": {
|
||||
"test2@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_c@faas.com",
|
||||
"email": "test2@faas.com",
|
||||
},
|
||||
expectedVariant: "green",
|
||||
expectedValue: "#00FF00",
|
||||
expectedVariant: "yellow",
|
||||
expectedValue: "#FFFF00",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test_d@faas.com": {
|
||||
"test3@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test_d@faas.com",
|
||||
"email": "test3@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test4@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test4@faas.com",
|
||||
},
|
||||
expectedVariant: "blue",
|
||||
expectedValue: "#0000FF",
|
||||
|
@ -596,13 +587,7 @@ func BenchmarkFractionalEvaluation(b *testing.B) {
|
|||
for name, tt := range tests {
|
||||
b.Run(name, func(b *testing.B) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
b.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
|
||||
je := NewJSON(log, &store.Flags{Flags: tt.flags.Flags})
|
||||
for i := 0; i < b.N; i++ {
|
||||
value, variant, reason, _, err := resolve[string](
|
||||
ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
|
|
@ -3,7 +3,6 @@ package evaluator
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
)
|
||||
|
||||
|
@ -12,12 +11,12 @@ type AnyValue struct {
|
|||
Variant string
|
||||
Reason string
|
||||
FlagKey string
|
||||
Metadata model.Metadata
|
||||
Metadata map[string]interface{}
|
||||
Error error
|
||||
}
|
||||
|
||||
func NewAnyValue(
|
||||
value interface{}, variant string, reason string, flagKey string, metadata model.Metadata,
|
||||
value interface{}, variant string, reason string, flagKey string, metadata map[string]interface{},
|
||||
err error,
|
||||
) AnyValue {
|
||||
return AnyValue{
|
||||
|
@ -45,31 +44,31 @@ type IResolver interface {
|
|||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (value bool, variant string, reason string, metadata model.Metadata, err error)
|
||||
context map[string]any) (value bool, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveStringValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value string, variant string, reason string, metadata model.Metadata, err error)
|
||||
value string, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveIntValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value int64, variant string, reason string, metadata model.Metadata, err error)
|
||||
value int64, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveFloatValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value float64, variant string, reason string, metadata model.Metadata, err error)
|
||||
value float64, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveObjectValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
flagKey string,
|
||||
context map[string]any) (
|
||||
value map[string]any, variant string, reason string, metadata model.Metadata, err error)
|
||||
value map[string]any, variant string, reason string, metadata map[string]interface{}, err error)
|
||||
ResolveAsAnyValue(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
|
@ -78,5 +77,5 @@ type IResolver interface {
|
|||
ResolveAllValues(
|
||||
ctx context.Context,
|
||||
reqID string,
|
||||
context map[string]any) (resolutions []AnyValue, metadata model.Metadata, err error)
|
||||
context map[string]any) (values []AnyValue, err error)
|
||||
}
|
||||
|
|
|
@ -64,13 +64,13 @@ func WithEvaluator(name string, evalFunc func(interface{}, interface{}) interfac
|
|||
|
||||
// JSON evaluator
|
||||
type JSON struct {
|
||||
store *store.Store
|
||||
store *store.Flags
|
||||
Logger *logger.Logger
|
||||
jsonEvalTracer trace.Tracer
|
||||
Resolver
|
||||
}
|
||||
|
||||
func NewJSON(logger *logger.Logger, s *store.Store, opts ...JSONEvaluatorOption) *JSON {
|
||||
func NewJSON(logger *logger.Logger, s *store.Flags, opts ...JSONEvaluatorOption) *JSON {
|
||||
logger = logger.WithFields(
|
||||
zap.String("component", "evaluator"),
|
||||
zap.String("evaluator", "json"),
|
||||
|
@ -103,12 +103,13 @@ func (je *JSON) SetState(payload sync.DataSync) (map[string]interface{}, bool, e
|
|||
_, span := je.jsonEvalTracer.Start(
|
||||
context.Background(),
|
||||
"flagSync",
|
||||
trace.WithAttributes(attribute.String("feature_flag.source", payload.Source)))
|
||||
trace.WithAttributes(attribute.String("feature_flag.source", payload.Source)),
|
||||
trace.WithAttributes(attribute.String("feature_flag.sync_type", payload.Type.String())))
|
||||
defer span.End()
|
||||
|
||||
var definition Definition
|
||||
var newFlags Flags
|
||||
|
||||
err := configToFlagDefinition(je.Logger, payload.FlagData, &definition)
|
||||
err := configToFlags(je.Logger, payload.FlagData, &newFlags)
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, "flagSync error")
|
||||
span.RecordError(err)
|
||||
|
@ -118,7 +119,18 @@ func (je *JSON) SetState(payload sync.DataSync) (map[string]interface{}, bool, e
|
|||
var events map[string]interface{}
|
||||
var reSync bool
|
||||
|
||||
events, reSync = je.store.Update(payload.Source, definition.Flags, definition.Metadata)
|
||||
switch payload.Type {
|
||||
case sync.ALL:
|
||||
events, reSync = je.store.Merge(je.Logger, payload.Source, payload.Selector, newFlags.Flags)
|
||||
case sync.ADD:
|
||||
events = je.store.Add(je.Logger, payload.Source, payload.Selector, newFlags.Flags)
|
||||
case sync.UPDATE:
|
||||
events = je.store.Update(je.Logger, payload.Source, payload.Selector, newFlags.Flags)
|
||||
case sync.DELETE:
|
||||
events = je.store.DeleteFlags(je.Logger, payload.Source, newFlags.Flags)
|
||||
default:
|
||||
return nil, false, fmt.Errorf("unsupported sync type: %d", payload.Type)
|
||||
}
|
||||
|
||||
// Number of events correlates to the number of flags changed through this sync, record it
|
||||
span.SetAttributes(attribute.Int("feature_flag.change_count", len(events)))
|
||||
|
@ -139,24 +151,19 @@ func NewResolver(store store.IStore, logger *logger.Logger, jsonEvalTracer trace
|
|||
jsonlogic.AddOperator(StartsWithEvaluationName, NewStringComparisonEvaluator(logger).StartsWithEvaluation)
|
||||
jsonlogic.AddOperator(EndsWithEvaluationName, NewStringComparisonEvaluator(logger).EndsWithEvaluation)
|
||||
jsonlogic.AddOperator(SemVerEvaluationName, NewSemVerComparison(logger).SemVerEvaluation)
|
||||
jsonlogic.AddOperator(LegacyFractionEvaluationName, NewLegacyFractional(logger).LegacyFractionalEvaluation)
|
||||
|
||||
return Resolver{store: store, Logger: logger, tracer: jsonEvalTracer}
|
||||
}
|
||||
|
||||
func (je *Resolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]AnyValue,
|
||||
model.Metadata, error,
|
||||
) {
|
||||
func (je *Resolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]AnyValue, error) {
|
||||
_, span := je.tracer.Start(ctx, "resolveAll")
|
||||
defer span.End()
|
||||
|
||||
var selector store.Selector
|
||||
s := ctx.Value(store.SelectorContextKey{})
|
||||
if s != nil {
|
||||
selector = s.(store.Selector)
|
||||
}
|
||||
allFlags, flagSetMetadata, err := je.store.GetAll(ctx, &selector)
|
||||
var err error
|
||||
allFlags, err := je.store.GetAll(ctx)
|
||||
if err != nil {
|
||||
return nil, flagSetMetadata, fmt.Errorf("error retreiving flags from the store: %w", err)
|
||||
return nil, fmt.Errorf("error retreiving flags from the store: %w", err)
|
||||
}
|
||||
|
||||
values := []AnyValue{}
|
||||
|
@ -188,7 +195,7 @@ func (je *Resolver) ResolveAllValues(ctx context.Context, reqID string, context
|
|||
values = append(values, NewAnyValue(value, variant, reason, flagKey, metadata, err))
|
||||
}
|
||||
|
||||
return values, flagSetMetadata, nil
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (je *Resolver) ResolveBooleanValue(
|
||||
|
@ -305,24 +312,19 @@ func resolve[T constraints](ctx context.Context, reqID string, key string, conte
|
|||
func (je *Resolver) evaluateVariant(ctx context.Context, reqID string, flagKey string, evalCtx map[string]any) (
|
||||
variant string, variants map[string]interface{}, reason string, metadata map[string]interface{}, err error,
|
||||
) {
|
||||
metadata = map[string]interface{}{}
|
||||
|
||||
var selector store.Selector
|
||||
s := ctx.Value(store.SelectorContextKey{})
|
||||
if s != nil {
|
||||
selector = s.(store.Selector)
|
||||
}
|
||||
flag, metadata, err := je.store.Get(ctx, flagKey, &selector)
|
||||
if err != nil {
|
||||
flag, ok := je.store.Get(ctx, flagKey)
|
||||
if !ok {
|
||||
// flag not found
|
||||
je.Logger.DebugWithID(reqID, fmt.Sprintf("requested flag could not be found: %s", flagKey))
|
||||
return "", map[string]interface{}{}, model.ErrorReason, metadata, errors.New(model.FlagNotFoundErrorCode)
|
||||
}
|
||||
|
||||
for key, value := range flag.Metadata {
|
||||
// If value is not nil or empty, copy to metadata
|
||||
if value != nil {
|
||||
metadata[key] = value
|
||||
}
|
||||
// add selector to evaluation metadata
|
||||
selector := je.store.SelectorForFlag(ctx, flag)
|
||||
if selector != "" {
|
||||
metadata[SelectorMetadataKey] = selector
|
||||
}
|
||||
|
||||
if flag.State == Disabled {
|
||||
|
@ -362,12 +364,7 @@ func (je *Resolver) evaluateVariant(ctx context.Context, reqID string, flagKey s
|
|||
|
||||
// check if string is "null" before we strip quotes, so we can differentiate between JSON null and "null"
|
||||
trimmed := strings.TrimSpace(result.String())
|
||||
|
||||
if trimmed == "null" {
|
||||
if flag.DefaultVariant == "" {
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.FlagNotFoundErrorCode)
|
||||
}
|
||||
|
||||
return flag.DefaultVariant, flag.Variants, model.DefaultReason, metadata, nil
|
||||
}
|
||||
|
||||
|
@ -380,13 +377,8 @@ func (je *Resolver) evaluateVariant(ctx context.Context, reqID string, flagKey s
|
|||
}
|
||||
je.Logger.ErrorWithID(reqID,
|
||||
fmt.Sprintf("invalid or missing variant: %s for flagKey: %s, variant is not valid", variant, flagKey))
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.GeneralErrorCode)
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.ParseErrorCode)
|
||||
}
|
||||
|
||||
if flag.DefaultVariant == "" {
|
||||
return "", flag.Variants, model.ErrorReason, metadata, errors.New(model.FlagNotFoundErrorCode)
|
||||
}
|
||||
|
||||
return flag.DefaultVariant, flag.Variants, model.StaticReason, metadata, nil
|
||||
}
|
||||
|
||||
|
@ -448,8 +440,8 @@ func loadAndCompileSchema(log *logger.Logger) *gojsonschema.Schema {
|
|||
return compiledSchema
|
||||
}
|
||||
|
||||
// configToFlagDefinition convert string configurations to flags and store them to pointer newFlags
|
||||
func configToFlagDefinition(log *logger.Logger, config string, definition *Definition) error {
|
||||
// configToFlags convert string configurations to flags and store them to pointer newFlags
|
||||
func configToFlags(log *logger.Logger, config string, newFlags *Flags) error {
|
||||
compiledSchema := loadAndCompileSchema(log)
|
||||
|
||||
flagStringLoader := gojsonschema.NewStringLoader(config)
|
||||
|
@ -468,22 +460,17 @@ func configToFlagDefinition(log *logger.Logger, config string, definition *Defin
|
|||
return fmt.Errorf("transposing evaluators: %w", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(transposedConfig), &definition)
|
||||
err = json.Unmarshal([]byte(transposedConfig), &newFlags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshalling provided configurations: %w", err)
|
||||
}
|
||||
|
||||
return validateDefaultVariants(definition)
|
||||
return validateDefaultVariants(newFlags)
|
||||
}
|
||||
|
||||
// validateDefaultVariants returns an error if any of the default variants aren't valid
|
||||
func validateDefaultVariants(flags *Definition) error {
|
||||
func validateDefaultVariants(flags *Flags) error {
|
||||
for name, flag := range flags.Flags {
|
||||
// Default Variant is not provided in the config
|
||||
if flag.DefaultVariant == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := flag.Variants[flag.DefaultVariant]; !ok {
|
||||
return fmt.Errorf(
|
||||
"default variant: '%s' isn't a valid variant of flag: '%s'", flag.DefaultVariant, name,
|
||||
|
|
|
@ -10,11 +10,6 @@ type Evaluators struct {
|
|||
Evaluators map[string]json.RawMessage `json:"$evaluators"`
|
||||
}
|
||||
|
||||
type Definition struct {
|
||||
Flags map[string]model.Flag `json:"flags"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type Flags struct {
|
||||
Flags map[string]model.Flag `json:"flags"`
|
||||
}
|
||||
|
|
|
@ -44,90 +44,7 @@ const ValidFlags = `{
|
|||
}
|
||||
}`
|
||||
|
||||
const NullDefault = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": null
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const UndefinedDefault = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const NullDefaultWithTargetting = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": null,
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
"==": [
|
||||
{
|
||||
"var": [
|
||||
"key"
|
||||
]
|
||||
},
|
||||
"value"
|
||||
]
|
||||
},
|
||||
"on"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const UndefinedDefaultWithTargetting = `{
|
||||
"flags": {
|
||||
"validFlag": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
"==": [
|
||||
{
|
||||
"var": [
|
||||
"key"
|
||||
]
|
||||
},
|
||||
"value"
|
||||
]
|
||||
},
|
||||
"on"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const (
|
||||
FlagSetID = "testSetId"
|
||||
Version = "v33"
|
||||
ValidFlag = "validFlag"
|
||||
MissingFlag = "missingFlag"
|
||||
StaticBoolFlag = "staticBoolFlag"
|
||||
StaticBoolValue = true
|
||||
|
@ -152,15 +69,9 @@ const (
|
|||
ColorProp = "color"
|
||||
ColorValue = "yellow"
|
||||
DisabledFlag = "disabledFlag"
|
||||
MetadataFlag = "metadataFlag"
|
||||
VersionOverride = "v66"
|
||||
)
|
||||
|
||||
var Flags = fmt.Sprintf(`{
|
||||
"metadata": {
|
||||
"flagSetId": "%s",
|
||||
"version": "%s"
|
||||
},
|
||||
"flags": {
|
||||
"%s": {
|
||||
"state": "ENABLED",
|
||||
|
@ -331,22 +242,9 @@ var Flags = fmt.Sprintf(`{
|
|||
"off": false
|
||||
},
|
||||
"defaultVariant": "on"
|
||||
},
|
||||
"%s": {
|
||||
"state": "ENABLED",
|
||||
"variants": {
|
||||
"on": true,
|
||||
"off": false
|
||||
},
|
||||
"defaultVariant": "on",
|
||||
"metadata": {
|
||||
"version": "%s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
FlagSetID,
|
||||
Version,
|
||||
StaticBoolFlag,
|
||||
StaticBoolValue,
|
||||
StaticStringFlag,
|
||||
|
@ -377,13 +275,11 @@ var Flags = fmt.Sprintf(`{
|
|||
DynamicObjectValue,
|
||||
ColorProp,
|
||||
ColorValue,
|
||||
DisabledFlag,
|
||||
MetadataFlag,
|
||||
VersionOverride)
|
||||
DisabledFlag)
|
||||
|
||||
func TestGetState_Valid_ContainsFlag(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error")
|
||||
}
|
||||
|
@ -405,9 +301,9 @@ func TestSetState_Invalid_Error(t *testing.T) {
|
|||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
// set state with an invalid flag definition
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: InvalidFlags, Source: "testSource"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error")
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: InvalidFlags})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -415,7 +311,7 @@ func TestSetState_Valid_NoError(t *testing.T) {
|
|||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
// set state with a valid flag definition
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: ValidFlags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -423,7 +319,7 @@ func TestSetState_Valid_NoError(t *testing.T) {
|
|||
|
||||
func TestResolveAllValues(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -439,7 +335,7 @@ func TestResolveAllValues(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
for _, test := range tests {
|
||||
vals, _, err := evaluator.ResolveAllValues(context.TODO(), reqID, test.context)
|
||||
vals, err := evaluator.ResolveAllValues(context.TODO(), reqID, test.context)
|
||||
if err != nil {
|
||||
t.Error("error from resolver", err)
|
||||
}
|
||||
|
@ -492,7 +388,7 @@ func TestResolveBooleanValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -527,7 +423,7 @@ func BenchmarkResolveBooleanValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -567,7 +463,7 @@ func TestResolveStringValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -603,7 +499,7 @@ func BenchmarkResolveStringValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -643,7 +539,7 @@ func TestResolveFloatValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -679,7 +575,7 @@ func BenchmarkResolveFloatValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -719,7 +615,7 @@ func TestResolveIntValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -755,7 +651,7 @@ func BenchmarkResolveIntValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -795,7 +691,7 @@ func TestResolveObjectValue(t *testing.T) {
|
|||
}
|
||||
const reqID = "default"
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -834,7 +730,7 @@ func BenchmarkResolveObjectValue(b *testing.B) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -879,7 +775,7 @@ func TestResolveAsAnyValue(t *testing.T) {
|
|||
}
|
||||
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: Flags})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
@ -898,37 +794,6 @@ func TestResolveAsAnyValue(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestResolve_DefaultVariant(t *testing.T) {
|
||||
tests := []struct {
|
||||
flags string
|
||||
flagKey string
|
||||
context map[string]interface{}
|
||||
reason string
|
||||
errorCode string
|
||||
}{
|
||||
{NullDefault, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
{UndefinedDefault, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
{NullDefaultWithTargetting, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
{UndefinedDefaultWithTargetting, ValidFlag, nil, model.ErrorReason, model.FlagNotFoundErrorCode},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: test.flags, Source: "testSource"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error")
|
||||
}
|
||||
|
||||
anyResult := evaluator.ResolveAsAnyValue(context.TODO(), "", test.flagKey, test.context)
|
||||
|
||||
assert.Equal(t, model.ErrorReason, anyResult.Reason)
|
||||
assert.EqualError(t, anyResult.Error, test.errorCode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetState_DefaultVariantValidation(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
jsonFlags string
|
||||
|
@ -982,7 +847,7 @@ func TestSetState_DefaultVariantValidation(t *testing.T) {
|
|||
t.Run(name, func(t *testing.T) {
|
||||
jsonEvaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.jsonFlags, Source: "testSource"})
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.jsonFlags})
|
||||
|
||||
if tt.valid && err != nil {
|
||||
t.Error(err)
|
||||
|
@ -994,6 +859,7 @@ func TestSetState_DefaultVariantValidation(t *testing.T) {
|
|||
func TestState_Evaluator(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
inputState string
|
||||
inputSyncType sync.Type
|
||||
expectedOutputState string
|
||||
expectedError bool
|
||||
expectedResync bool
|
||||
|
@ -1029,6 +895,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedOutputState: `
|
||||
{
|
||||
"flags": {
|
||||
|
@ -1041,7 +908,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1089,6 +956,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedOutputState: `
|
||||
{
|
||||
"flags": {
|
||||
|
@ -1101,7 +969,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1145,6 +1013,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedError: true,
|
||||
},
|
||||
"invalid targeting": {
|
||||
|
@ -1177,7 +1046,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
"off": false
|
||||
},
|
||||
"defaultVariant": "off",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
|
@ -1198,6 +1067,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
"flagSources":null
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedError: false,
|
||||
expectedOutputState: `
|
||||
{
|
||||
|
@ -1211,7 +1081,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1230,7 +1100,7 @@ func TestState_Evaluator(t *testing.T) {
|
|||
"off": false
|
||||
},
|
||||
"defaultVariant": "off",
|
||||
"source":"testSource",
|
||||
"source":"",
|
||||
"selector":"",
|
||||
"targeting": {
|
||||
"if": [
|
||||
|
@ -1280,15 +1150,47 @@ func TestState_Evaluator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: sync.ALL,
|
||||
expectedError: true,
|
||||
},
|
||||
"unexpected sync type": {
|
||||
inputState: `
|
||||
{
|
||||
"flags": {
|
||||
"fibAlgo": {
|
||||
"variants": {
|
||||
"recursive": "recursive",
|
||||
"memo": "memo",
|
||||
"loop": "loop",
|
||||
"binet": "binet"
|
||||
},
|
||||
"defaultVariant": "recursive",
|
||||
"state": "ENABLED",
|
||||
"targeting": {
|
||||
"if": [
|
||||
{
|
||||
"$ref": "emailWithFaas"
|
||||
}, "binet", null
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"$evaluators": {
|
||||
"emailWithFaas": ""
|
||||
}
|
||||
}
|
||||
`,
|
||||
inputSyncType: 999,
|
||||
expectedError: true,
|
||||
expectedResync: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
jsonEvaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, resync, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.inputState, Source: "testSource"})
|
||||
_, resync, err := jsonEvaluator.SetState(sync.DataSync{FlagData: tt.inputState})
|
||||
if err != nil {
|
||||
if !tt.expectedError {
|
||||
t.Error(err)
|
||||
|
@ -1321,8 +1223,8 @@ func TestState_Evaluator(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedOutputJSON["flags"], gotOutputJSON) {
|
||||
t.Errorf("expected state: %v got state: %v", expectedOutputJSON["flags"], gotOutputJSON)
|
||||
if !reflect.DeepEqual(expectedOutputJSON["flags"], gotOutputJSON["flags"]) {
|
||||
t.Errorf("expected state: %v got state: %v", expectedOutputJSON, gotOutputJSON)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -1330,11 +1232,13 @@ func TestState_Evaluator(t *testing.T) {
|
|||
|
||||
func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
dataSyncType sync.Type
|
||||
flagResolution func(evaluator *evaluator.JSON) error
|
||||
}{
|
||||
"Add_ResolveAllValues": {
|
||||
dataSyncType: sync.ADD,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
_, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1342,8 +1246,9 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"Update_ResolveAllValues": {
|
||||
dataSyncType: sync.UPDATE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
_, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1351,8 +1256,9 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"Delete_ResolveAllValues": {
|
||||
dataSyncType: sync.DELETE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
_, err := evaluator.ResolveAllValues(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1360,30 +1266,35 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"Add_ResolveBooleanValue": {
|
||||
dataSyncType: sync.ADD,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveBooleanValue(context.TODO(), "", StaticBoolFlag, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Update_ResolveStringValue": {
|
||||
dataSyncType: sync.UPDATE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveBooleanValue(context.TODO(), "", StaticStringValue, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Delete_ResolveIntValue": {
|
||||
dataSyncType: sync.DELETE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveIntValue(context.TODO(), "", StaticIntFlag, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Add_ResolveFloatValue": {
|
||||
dataSyncType: sync.ADD,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveFloatValue(context.TODO(), "", StaticFloatFlag, nil)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"Update_ResolveObjectValue": {
|
||||
dataSyncType: sync.UPDATE,
|
||||
flagResolution: func(evaluator *evaluator.JSON) error {
|
||||
_, _, _, _, err := evaluator.ResolveObjectValue(context.TODO(), "", StaticObjectFlag, nil)
|
||||
return err
|
||||
|
@ -1395,7 +1306,7 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
t.Run(name, func(t *testing.T) {
|
||||
jsonEvaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Type: sync.ADD})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1418,7 +1329,7 @@ func TestFlagStateSafeForConcurrentReadWrites(t *testing.T) {
|
|||
errChan <- nil
|
||||
return
|
||||
default:
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Source: "testSource"})
|
||||
_, _, err := jsonEvaluator.SetState(sync.DataSync{FlagData: Flags, Type: tt.dataSyncType})
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
|
@ -1460,7 +1371,7 @@ func TestFlagdAmbientProperties(t *testing.T) {
|
|||
t.Run("flagKeyIsInTheContext", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"welcome-banner": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1500,7 +1411,7 @@ func TestFlagdAmbientProperties(t *testing.T) {
|
|||
t.Run("timestampIsInTheContext", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"welcome-banner": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1534,7 +1445,7 @@ func TestTargetingVariantBehavior(t *testing.T) {
|
|||
t.Run("missing variant error", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"missing-variant": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1562,7 +1473,7 @@ func TestTargetingVariantBehavior(t *testing.T) {
|
|||
t.Run("null fallback", func(t *testing.T) {
|
||||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"null-fallback": {
|
||||
"state": "ENABLED",
|
||||
|
@ -1595,7 +1506,7 @@ func TestTargetingVariantBehavior(t *testing.T) {
|
|||
evaluator := evaluator.NewJSON(logger.NewLogger(nil, false), store.NewFlags())
|
||||
|
||||
//nolint:dupword
|
||||
_, _, err := evaluator.SetState(sync.DataSync{Source: "testSource", FlagData: `{
|
||||
_, _, err := evaluator.SetState(sync.DataSync{FlagData: `{
|
||||
"flags": {
|
||||
"match-boolean": {
|
||||
"state": "ENABLED",
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
// This evaluation type is deprecated and will be removed before v1.
|
||||
// Do not enhance it or use it for reference.
|
||||
|
||||
package evaluator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
const (
|
||||
LegacyFractionEvaluationName = "fractionalEvaluation"
|
||||
LegacyFractionEvaluationLink = "https://flagd.dev/concepts/#migrating-from-legacy-fractionalevaluation"
|
||||
)
|
||||
|
||||
// Deprecated: LegacyFractional is deprecated. This will be removed prior to v1 release.
|
||||
type LegacyFractional struct {
|
||||
Logger *logger.Logger
|
||||
}
|
||||
|
||||
type legacyFractionalEvaluationDistribution struct {
|
||||
variant string
|
||||
percentage int
|
||||
}
|
||||
|
||||
func NewLegacyFractional(logger *logger.Logger) *LegacyFractional {
|
||||
return &LegacyFractional{Logger: logger}
|
||||
}
|
||||
|
||||
func (fe *LegacyFractional) LegacyFractionalEvaluation(values, data interface{}) interface{} {
|
||||
fe.Logger.Warn(
|
||||
fmt.Sprintf("%s is deprecated, please use %s, see: %s",
|
||||
LegacyFractionEvaluationName,
|
||||
FractionEvaluationName,
|
||||
LegacyFractionEvaluationLink))
|
||||
|
||||
valueToDistribute, feDistributions, err := parseLegacyFractionalEvaluationData(values, data)
|
||||
if err != nil {
|
||||
fe.Logger.Error(fmt.Sprintf("parse fractional evaluation data: %v", err))
|
||||
return nil
|
||||
}
|
||||
|
||||
return distributeLegacyValue(valueToDistribute, feDistributions)
|
||||
}
|
||||
|
||||
func parseLegacyFractionalEvaluationData(values, data interface{}) (string,
|
||||
[]legacyFractionalEvaluationDistribution, error,
|
||||
) {
|
||||
valuesArray, ok := values.([]interface{})
|
||||
if !ok {
|
||||
return "", nil, errors.New("fractional evaluation data is not an array")
|
||||
}
|
||||
if len(valuesArray) < 2 {
|
||||
return "", nil, errors.New("fractional evaluation data has length under 2")
|
||||
}
|
||||
|
||||
bucketBy, ok := valuesArray[0].(string)
|
||||
if !ok {
|
||||
return "", nil, errors.New("first element of fractional evaluation data isn't of type string")
|
||||
}
|
||||
|
||||
dataMap, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", nil, errors.New("data isn't of type map[string]interface{}")
|
||||
}
|
||||
|
||||
v, ok := dataMap[bucketBy]
|
||||
if !ok {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
valueToDistribute, ok := v.(string)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("var: %s isn't of type string", bucketBy)
|
||||
}
|
||||
|
||||
feDistributions, err := parseLegacyFractionalEvaluationDistributions(valuesArray)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return valueToDistribute, feDistributions, nil
|
||||
}
|
||||
|
||||
func parseLegacyFractionalEvaluationDistributions(values []interface{}) (
|
||||
[]legacyFractionalEvaluationDistribution, error,
|
||||
) {
|
||||
sumOfPercentages := 0
|
||||
var feDistributions []legacyFractionalEvaluationDistribution
|
||||
for i := 1; i < len(values); i++ {
|
||||
distributionArray, ok := values[i].([]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("distribution elements aren't of type []interface{}")
|
||||
}
|
||||
|
||||
if len(distributionArray) != 2 {
|
||||
return nil, errors.New("distribution element isn't length 2")
|
||||
}
|
||||
|
||||
variant, ok := distributionArray[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("first element of distribution element isn't string")
|
||||
}
|
||||
|
||||
percentage, ok := distributionArray[1].(float64)
|
||||
if !ok {
|
||||
return nil, errors.New("second element of distribution element isn't float")
|
||||
}
|
||||
|
||||
sumOfPercentages += int(percentage)
|
||||
|
||||
feDistributions = append(feDistributions, legacyFractionalEvaluationDistribution{
|
||||
variant: variant,
|
||||
percentage: int(percentage),
|
||||
})
|
||||
}
|
||||
|
||||
if sumOfPercentages != 100 {
|
||||
return nil, fmt.Errorf("percentages must sum to 100, got: %d", sumOfPercentages)
|
||||
}
|
||||
|
||||
return feDistributions, nil
|
||||
}
|
||||
|
||||
func distributeLegacyValue(value string, feDistribution []legacyFractionalEvaluationDistribution) string {
|
||||
hashValue := xxh3.HashString(value)
|
||||
|
||||
hashRatio := float64(hashValue) / math.Pow(2, 64) // divide the hash value by the largest possible value, integer 2^64
|
||||
|
||||
bucket := int(hashRatio * 100) // integer in range [0, 99]
|
||||
|
||||
rangeEnd := 0
|
||||
for _, dist := range feDistribution {
|
||||
rangeEnd += dist.percentage
|
||||
if bucket < rangeEnd {
|
||||
return dist.variant
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,300 @@
|
|||
package evaluator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/open-feature/flagd/core/pkg/store"
|
||||
)
|
||||
|
||||
func TestLegacyFractionalEvaluation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
flags := Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"in": ["@faas.com", {
|
||||
"var": ["email"]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
],
|
||||
[
|
||||
"green",
|
||||
25
|
||||
],
|
||||
[
|
||||
"yellow",
|
||||
25
|
||||
]
|
||||
]
|
||||
}, null
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
flags Flags
|
||||
flagKey string
|
||||
context map[string]any
|
||||
expectedValue string
|
||||
expectedVariant string
|
||||
expectedReason string
|
||||
expectedErrorCode string
|
||||
}{
|
||||
"test@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test2@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test2@faas.com",
|
||||
},
|
||||
expectedVariant: "yellow",
|
||||
expectedValue: "#FFFF00",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test3@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test3@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"test4@faas.com": {
|
||||
flags: flags,
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test4@faas.com",
|
||||
},
|
||||
expectedVariant: "blue",
|
||||
expectedValue: "#0000FF",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"non even split": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"in": ["@faas.com", {
|
||||
"var": ["email"]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
50
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
],
|
||||
[
|
||||
"green",
|
||||
25
|
||||
]
|
||||
]
|
||||
}, null
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "test4@faas.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"fallback to default variant if no email provided": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
],
|
||||
[
|
||||
"green",
|
||||
25
|
||||
],
|
||||
[
|
||||
"yellow",
|
||||
25
|
||||
]
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{},
|
||||
expectedVariant: "",
|
||||
expectedValue: "",
|
||||
expectedReason: model.ErrorReason,
|
||||
expectedErrorCode: model.ParseErrorCode,
|
||||
},
|
||||
"fallback to default variant if invalid variant as result of fractional evaluation": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"black",
|
||||
100
|
||||
]
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "foo@foo.com",
|
||||
},
|
||||
expectedVariant: "",
|
||||
expectedValue: "",
|
||||
expectedReason: model.ErrorReason,
|
||||
expectedErrorCode: model.ParseErrorCode,
|
||||
},
|
||||
"fallback to default variant if percentages don't sum to 100": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"fractionalEvaluation": [
|
||||
"email",
|
||||
[
|
||||
"red",
|
||||
25
|
||||
],
|
||||
[
|
||||
"blue",
|
||||
25
|
||||
]
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"email": "foo@foo.com",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.DefaultReason,
|
||||
},
|
||||
}
|
||||
const reqID = "default"
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
if value != tt.expectedValue {
|
||||
t.Errorf("expected value '%s', got '%s'", tt.expectedValue, value)
|
||||
}
|
||||
|
||||
if variant != tt.expectedVariant {
|
||||
t.Errorf("expected variant '%s', got '%s'", tt.expectedVariant, variant)
|
||||
}
|
||||
|
||||
if reason != tt.expectedReason {
|
||||
t.Errorf("expected reason '%s', got '%s'", tt.expectedReason, reason)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errorCode := err.Error()
|
||||
if errorCode != tt.expectedErrorCode {
|
||||
t.Errorf("expected err '%v', got '%v'", tt.expectedErrorCode, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -14,7 +14,6 @@ import (
|
|||
reflect "reflect"
|
||||
|
||||
evaluator "github.com/open-feature/flagd/core/pkg/evaluator"
|
||||
model "github.com/open-feature/flagd/core/pkg/model"
|
||||
sync "github.com/open-feature/flagd/core/pkg/sync"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
@ -23,7 +22,6 @@ import (
|
|||
type MockIEvaluator struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockIEvaluatorMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockIEvaluatorMockRecorder is the mock recorder for MockIEvaluator.
|
||||
|
@ -59,13 +57,12 @@ func (mr *MockIEvaluatorMockRecorder) GetState() *gomock.Call {
|
|||
}
|
||||
|
||||
// ResolveAllValues mocks base method.
|
||||
func (m *MockIEvaluator) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveAllValues", ctx, reqID, context)
|
||||
ret0, _ := ret[0].([]evaluator.AnyValue)
|
||||
ret1, _ := ret[1].(model.Metadata)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ResolveAllValues indicates an expected call of ResolveAllValues.
|
||||
|
@ -89,13 +86,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveAsAnyValue(ctx, reqID, flagKey, con
|
|||
}
|
||||
|
||||
// ResolveBooleanValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveBooleanValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -107,13 +104,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveBooleanValue(ctx, reqID, flagKey, c
|
|||
}
|
||||
|
||||
// ResolveFloatValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveFloatValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(float64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -125,13 +122,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveFloatValue(ctx, reqID, flagKey, con
|
|||
}
|
||||
|
||||
// ResolveIntValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveIntValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -143,13 +140,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveIntValue(ctx, reqID, flagKey, conte
|
|||
}
|
||||
|
||||
// ResolveObjectValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveObjectValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(map[string]any)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -161,13 +158,13 @@ func (mr *MockIEvaluatorMockRecorder) ResolveObjectValue(ctx, reqID, flagKey, co
|
|||
}
|
||||
|
||||
// ResolveStringValue mocks base method.
|
||||
func (m *MockIEvaluator) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, model.Metadata, error) {
|
||||
func (m *MockIEvaluator) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveStringValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -179,10 +176,10 @@ func (mr *MockIEvaluatorMockRecorder) ResolveStringValue(ctx, reqID, flagKey, co
|
|||
}
|
||||
|
||||
// SetState mocks base method.
|
||||
func (m *MockIEvaluator) SetState(payload sync.DataSync) (model.Metadata, bool, error) {
|
||||
func (m *MockIEvaluator) SetState(payload sync.DataSync) (map[string]any, bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetState", payload)
|
||||
ret0, _ := ret[0].(model.Metadata)
|
||||
ret0, _ := ret[0].(map[string]any)
|
||||
ret1, _ := ret[1].(bool)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
|
@ -198,7 +195,6 @@ func (mr *MockIEvaluatorMockRecorder) SetState(payload any) *gomock.Call {
|
|||
type MockIResolver struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockIResolverMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockIResolverMockRecorder is the mock recorder for MockIResolver.
|
||||
|
@ -219,13 +215,12 @@ func (m *MockIResolver) EXPECT() *MockIResolverMockRecorder {
|
|||
}
|
||||
|
||||
// ResolveAllValues mocks base method.
|
||||
func (m *MockIResolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveAllValues(ctx context.Context, reqID string, context map[string]any) ([]evaluator.AnyValue, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveAllValues", ctx, reqID, context)
|
||||
ret0, _ := ret[0].([]evaluator.AnyValue)
|
||||
ret1, _ := ret[1].(model.Metadata)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ResolveAllValues indicates an expected call of ResolveAllValues.
|
||||
|
@ -249,13 +244,13 @@ func (mr *MockIResolverMockRecorder) ResolveAsAnyValue(ctx, reqID, flagKey, cont
|
|||
}
|
||||
|
||||
// ResolveBooleanValue mocks base method.
|
||||
func (m *MockIResolver) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveBooleanValue(ctx context.Context, reqID, flagKey string, context map[string]any) (bool, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveBooleanValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -267,13 +262,13 @@ func (mr *MockIResolverMockRecorder) ResolveBooleanValue(ctx, reqID, flagKey, co
|
|||
}
|
||||
|
||||
// ResolveFloatValue mocks base method.
|
||||
func (m *MockIResolver) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveFloatValue(ctx context.Context, reqID, flagKey string, context map[string]any) (float64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveFloatValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(float64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -285,13 +280,13 @@ func (mr *MockIResolverMockRecorder) ResolveFloatValue(ctx, reqID, flagKey, cont
|
|||
}
|
||||
|
||||
// ResolveIntValue mocks base method.
|
||||
func (m *MockIResolver) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveIntValue(ctx context.Context, reqID, flagKey string, context map[string]any) (int64, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveIntValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -303,13 +298,13 @@ func (mr *MockIResolverMockRecorder) ResolveIntValue(ctx, reqID, flagKey, contex
|
|||
}
|
||||
|
||||
// ResolveObjectValue mocks base method.
|
||||
func (m *MockIResolver) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveObjectValue(ctx context.Context, reqID, flagKey string, context map[string]any) (map[string]any, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveObjectValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(map[string]any)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
@ -321,13 +316,13 @@ func (mr *MockIResolverMockRecorder) ResolveObjectValue(ctx, reqID, flagKey, con
|
|||
}
|
||||
|
||||
// ResolveStringValue mocks base method.
|
||||
func (m *MockIResolver) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, model.Metadata, error) {
|
||||
func (m *MockIResolver) ResolveStringValue(ctx context.Context, reqID, flagKey string, context map[string]any) (string, string, string, map[string]any, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResolveStringValue", ctx, reqID, flagKey, context)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(string)
|
||||
ret3, _ := ret[3].(model.Metadata)
|
||||
ret3, _ := ret[3].(map[string]any)
|
||||
ret4, _ := ret[4].(error)
|
||||
return ret0, ret1, ret2, ret3, ret4
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ func parseSemverEvaluationData(values interface{}) (string, string, SemVerOperat
|
|||
}
|
||||
|
||||
if len(parsed) != 3 {
|
||||
return "", "", "", errors.New("sem_ver evaluation must contain a value, an operator, and a comparison target")
|
||||
return "", "", "", errors.New("sem_ver evaluation must contain a value, an operator and a comparison target")
|
||||
}
|
||||
|
||||
actualVersion, err := parseSemanticVersion(parsed[0])
|
||||
|
@ -122,17 +122,11 @@ func parseSemverEvaluationData(values interface{}) (string, string, SemVerOperat
|
|||
return actualVersion, targetVersion, operator, nil
|
||||
}
|
||||
|
||||
func ensureString(v interface{}) string {
|
||||
if str, ok := v.(string); ok {
|
||||
// It's already a string
|
||||
return str
|
||||
}
|
||||
// Convert to string if not already
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
|
||||
func parseSemanticVersion(v interface{}) (string, error) {
|
||||
version := ensureString(v)
|
||||
version, ok := v.(string)
|
||||
if !ok {
|
||||
return "", errors.New("sem_ver evaluation: property did not resolve to a string value")
|
||||
}
|
||||
// version strings are only valid in the semver package if they start with a 'v'
|
||||
// if it's not present in the given value, we prepend it
|
||||
if !strings.HasPrefix(version, "v") {
|
||||
|
@ -140,7 +134,7 @@ func parseSemanticVersion(v interface{}) (string, error) {
|
|||
}
|
||||
|
||||
if !semver.IsValid(version) {
|
||||
return "", fmt.Errorf("'%v' is not a valid semantic version string", version)
|
||||
return "", errors.New("not a valid semantic version string")
|
||||
}
|
||||
|
||||
return version, nil
|
||||
|
@ -149,7 +143,7 @@ func parseSemanticVersion(v interface{}) (string, error) {
|
|||
func parseOperator(o interface{}) (SemVerOperator, error) {
|
||||
operatorString, ok := o.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not parse operator '%v'", o)
|
||||
return "", errors.New("could not parse operator")
|
||||
}
|
||||
|
||||
return SemVerOperator(operatorString), nil
|
||||
|
|
|
@ -23,76 +23,6 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
want bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "invalid version",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "invalid",
|
||||
v2: "v1.0.0",
|
||||
},
|
||||
want: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "preview version vs non preview version",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "v1.0.0-preview.1.2",
|
||||
v2: "v1.0.0",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "preview version vs preview version",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "v1.0.0-preview.1.3",
|
||||
v2: "v1.0.0-preview.1.2",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v left greater",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "0.0.1",
|
||||
v2: "v0.0.2",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v right greater",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "v0.0.1",
|
||||
v2: "0.0.2",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v right equals",
|
||||
svo: Equals,
|
||||
args: args{
|
||||
v1: "v0.0.1",
|
||||
v2: "0.0.1",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no prefixed v both",
|
||||
svo: Greater,
|
||||
args: args{
|
||||
v1: "0.0.1",
|
||||
v2: "0.0.2",
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid operator",
|
||||
svo: "",
|
||||
|
@ -103,16 +33,6 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
want: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "less with large number",
|
||||
svo: Less,
|
||||
args: args{
|
||||
v1: "v1234.0.1",
|
||||
v2: "v1235.0.2",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "less",
|
||||
svo: Less,
|
||||
|
@ -123,16 +43,6 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no minor version",
|
||||
svo: Less,
|
||||
args: args{
|
||||
v1: "v1.0",
|
||||
v2: "v1.2",
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not less",
|
||||
svo: Less,
|
||||
|
@ -296,28 +206,19 @@ func TestSemVerOperator_Compare(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var operatorInterface interface{} = string(tt.svo)
|
||||
actualVersion, targetVersion, operator, err := parseSemverEvaluationData([]interface{}{tt.args.v1, operatorInterface, tt.args.v2})
|
||||
if err != nil {
|
||||
require.Truef(t, tt.wantErr, "Error parsing semver evaluation data. actualVersion: %s, targetVersion: %s, operator: %s, err: %s", actualVersion, targetVersion, operator, err)
|
||||
return
|
||||
}
|
||||
|
||||
got, err := operator.compare(actualVersion, targetVersion)
|
||||
got, err := tt.svo.compare(tt.args.v1, tt.args.v2)
|
||||
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.Nil(t, err)
|
||||
require.Equalf(t, tt.want, got, "compare(%v, %v) operator: %s", tt.args.v1, tt.args.v2, operator)
|
||||
require.Equalf(t, tt.want, got, "compare(%v, %v)", tt.args.v1, tt.args.v2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEvaluator_semVerEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
tests := map[string]struct {
|
||||
|
@ -484,130 +385,6 @@ func TestJSONEvaluator_semVerEvaluation(t *testing.T) {
|
|||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions given as double - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [1.2, "=", "1.2"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions given as int - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [1, "=", "v1.0.0"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions and minor-version without patch version operator provided - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [1.2, "=", "1.2"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions with prefixed v operator provided - match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"headerColor": {
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "red",
|
||||
Variants: map[string]any{
|
||||
"red": "#FF0000",
|
||||
"blue": "#0000FF",
|
||||
"green": "#00FF00",
|
||||
"yellow": "#FFFF00",
|
||||
},
|
||||
Targeting: []byte(`{
|
||||
"if": [
|
||||
{
|
||||
"sem_ver": [{"var": "version"}, "<", "v1.2"]
|
||||
},
|
||||
"red", "green"
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
flagKey: "headerColor",
|
||||
context: map[string]any{
|
||||
"version": "v1.0.0",
|
||||
},
|
||||
expectedVariant: "red",
|
||||
expectedValue: "#FF0000",
|
||||
expectedReason: model.TargetingMatchReason,
|
||||
},
|
||||
"versions and major-version operator provided - no match": {
|
||||
flags: Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
|
@ -924,12 +701,8 @@ func TestJSONEvaluator_semVerEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
|
|
@ -13,8 +13,6 @@ import (
|
|||
)
|
||||
|
||||
func TestJSONEvaluator_startsWithEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
tests := map[string]struct {
|
||||
|
@ -187,12 +185,8 @@ func TestJSONEvaluator_startsWithEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
@ -216,8 +210,6 @@ func TestJSONEvaluator_startsWithEvaluation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJSONEvaluator_endsWithEvaluation(t *testing.T) {
|
||||
const source = "testSource"
|
||||
var sources = []string{source}
|
||||
ctx := context.Background()
|
||||
|
||||
tests := map[string]struct {
|
||||
|
@ -390,12 +382,9 @@ func TestJSONEvaluator_endsWithEvaluation(t *testing.T) {
|
|||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
log := logger.NewLogger(nil, false)
|
||||
s, err := store.NewStore(log, sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
je := NewJSON(log, s)
|
||||
je.store.Update(source, tt.flags.Flags, model.Metadata{})
|
||||
je := NewJSON(log, store.NewFlags())
|
||||
|
||||
je.store.Flags = tt.flags.Flags
|
||||
|
||||
value, variant, reason, _, err := resolve[string](ctx, reqID, tt.flagKey, tt.context, je.evaluateVariant)
|
||||
|
||||
|
|
|
@ -2,26 +2,15 @@ package model
|
|||
|
||||
import "encoding/json"
|
||||
|
||||
const Key = "Key"
|
||||
const FlagSetId = "FlagSetId"
|
||||
const Source = "Source"
|
||||
const Priority = "Priority"
|
||||
|
||||
type Flag struct {
|
||||
Key string `json:"-"` // not serialized, used only for indexing
|
||||
FlagSetId string `json:"-"` // not serialized, used only for indexing
|
||||
Priority int `json:"-"` // not serialized, used only for indexing
|
||||
State string `json:"state"`
|
||||
DefaultVariant string `json:"defaultVariant"`
|
||||
Variants map[string]any `json:"variants"`
|
||||
Targeting json.RawMessage `json:"targeting,omitempty"`
|
||||
Source string `json:"source"`
|
||||
Selector string `json:"selector"`
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type Evaluators struct {
|
||||
Evaluators map[string]json.RawMessage `json:"$evaluators"`
|
||||
}
|
||||
|
||||
type Metadata = map[string]interface{}
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
package notifications
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
const typeField = "type"
|
||||
|
||||
// Use to represent change notifications for mode PROVIDER_CONFIGURATION_CHANGE events.
|
||||
type Notifications map[string]any
|
||||
|
||||
// Generate notifications (deltas) from old and new flag sets for use in RPC mode PROVIDER_CONFIGURATION_CHANGE events.
|
||||
func NewFromFlags(oldFlags, newFlags map[string]model.Flag) Notifications {
|
||||
notifications := map[string]interface{}{}
|
||||
|
||||
// flags removed
|
||||
for key := range oldFlags {
|
||||
if _, ok := newFlags[key]; !ok {
|
||||
notifications[key] = map[string]interface{}{
|
||||
typeField: string(model.NotificationDelete),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// flags added or modified
|
||||
for key, newFlag := range newFlags {
|
||||
oldFlag, exists := oldFlags[key]
|
||||
if !exists {
|
||||
notifications[key] = map[string]interface{}{
|
||||
typeField: string(model.NotificationCreate),
|
||||
}
|
||||
} else if !flagsEqual(oldFlag, newFlag) {
|
||||
notifications[key] = map[string]interface{}{
|
||||
typeField: string(model.NotificationUpdate),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
func flagsEqual(a, b model.Flag) bool {
|
||||
return a.State == b.State &&
|
||||
a.DefaultVariant == b.DefaultVariant &&
|
||||
reflect.DeepEqual(a.Variants, b.Variants) &&
|
||||
reflect.DeepEqual(a.Targeting, b.Targeting) &&
|
||||
a.Source == b.Source &&
|
||||
a.Selector == b.Selector &&
|
||||
reflect.DeepEqual(a.Metadata, b.Metadata)
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
package notifications
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFromFlags(t *testing.T) {
|
||||
flagA := model.Flag{
|
||||
Key: "flagA",
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "on",
|
||||
Source: "source1",
|
||||
}
|
||||
flagAUpdated := model.Flag{
|
||||
Key: "flagA",
|
||||
State: "DISABLED",
|
||||
DefaultVariant: "on",
|
||||
Source: "source1",
|
||||
}
|
||||
flagB := model.Flag{
|
||||
Key: "flagB",
|
||||
State: "ENABLED",
|
||||
DefaultVariant: "off",
|
||||
Source: "source1",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
oldFlags map[string]model.Flag
|
||||
newFlags map[string]model.Flag
|
||||
want Notifications
|
||||
}{
|
||||
{
|
||||
name: "flag added",
|
||||
oldFlags: map[string]model.Flag{},
|
||||
newFlags: map[string]model.Flag{"flagA": flagA},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flag deleted",
|
||||
oldFlags: map[string]model.Flag{"flagA": flagA},
|
||||
newFlags: map[string]model.Flag{},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flag changed",
|
||||
oldFlags: map[string]model.Flag{"flagA": flagA},
|
||||
newFlags: map[string]model.Flag{"flagA": flagAUpdated},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flag unchanged",
|
||||
oldFlags: map[string]model.Flag{"flagA": flagA},
|
||||
newFlags: map[string]model.Flag{"flagA": flagA},
|
||||
want: Notifications{},
|
||||
},
|
||||
{
|
||||
name: "mixed changes",
|
||||
oldFlags: map[string]model.Flag{
|
||||
"flagA": flagA,
|
||||
"flagB": flagB,
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"flagA": flagAUpdated, // updated
|
||||
"flagC": flagA, // added
|
||||
},
|
||||
want: Notifications{
|
||||
"flagA": map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
},
|
||||
"flagB": map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
},
|
||||
"flagC": map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := NewFromFlags(tt.oldFlags, tt.newFlags)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -2,7 +2,6 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
)
|
||||
|
@ -33,9 +32,6 @@ type Configuration struct {
|
|||
SocketPath string
|
||||
CORS []string
|
||||
Options []connect.HandlerOption
|
||||
ContextValues map[string]any
|
||||
HeaderToContextKeyMappings map[string]string
|
||||
StreamDeadline time.Duration
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,35 +16,32 @@ type EvaluationSuccess struct {
|
|||
Key string `json:"key"`
|
||||
Reason string `json:"reason"`
|
||||
Variant string `json:"variant"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type BulkEvaluationResponse struct {
|
||||
Flags []interface{} `json:"flags"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
type EvaluationError struct {
|
||||
Key string `json:"key"`
|
||||
ErrorCode string `json:"errorCode"`
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
type BulkEvaluationError struct {
|
||||
ErrorCode string `json:"errorCode"`
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
Metadata model.Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
type InternalError struct {
|
||||
ErrorDetails string `json:"errorDetails"`
|
||||
}
|
||||
|
||||
func BulkEvaluationResponseFrom(resolutions []evaluator.AnyValue, metadata model.Metadata) BulkEvaluationResponse {
|
||||
func BulkEvaluationResponseFrom(values []evaluator.AnyValue) BulkEvaluationResponse {
|
||||
evaluations := make([]interface{}, 0)
|
||||
|
||||
for _, value := range resolutions {
|
||||
for _, value := range values {
|
||||
if value.Error != nil {
|
||||
_, evaluationError := EvaluationErrorResponseFrom(value)
|
||||
evaluations = append(evaluations, evaluationError)
|
||||
|
@ -55,7 +52,6 @@ func BulkEvaluationResponseFrom(resolutions []evaluator.AnyValue, metadata model
|
|||
|
||||
return BulkEvaluationResponse{
|
||||
evaluations,
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,7 +90,6 @@ func BulkEvaluationContextErrorFrom(code string, details string) BulkEvaluationE
|
|||
func EvaluationErrorResponseFrom(result evaluator.AnyValue) (int, EvaluationError) {
|
||||
payload := EvaluationError{
|
||||
Key: result.FlagKey,
|
||||
Metadata: result.Metadata,
|
||||
}
|
||||
|
||||
status := 400
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestBulkEvaluationResponse(t *testing.T) {
|
|||
{
|
||||
name: "empty input",
|
||||
input: nil,
|
||||
marshalledOutput: "{\"flags\":[],\"metadata\":{}}",
|
||||
marshalledOutput: "{\"flags\":[]}",
|
||||
},
|
||||
{
|
||||
name: "valid values",
|
||||
|
@ -75,16 +75,15 @@ func TestBulkEvaluationResponse(t *testing.T) {
|
|||
Reason: model.ErrorReason,
|
||||
FlagKey: "errorFlag",
|
||||
Error: errors.New(model.FlagNotFoundErrorCode),
|
||||
Metadata: map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
marshalledOutput: "{\"flags\":[{\"value\":false,\"key\":\"key\",\"reason\":\"STATIC\",\"variant\":\"false\",\"metadata\":{\"key\":\"value\"}},{\"key\":\"errorFlag\",\"errorCode\":\"FLAG_NOT_FOUND\",\"errorDetails\":\"flag `errorFlag` does not exist\",\"metadata\":{}}],\"metadata\":{}}",
|
||||
marshalledOutput: "{\"flags\":[{\"value\":false,\"key\":\"key\",\"reason\":\"STATIC\",\"variant\":\"false\",\"metadata\":{\"key\":\"value\"}},{\"key\":\"errorFlag\",\"errorCode\":\"FLAG_NOT_FOUND\",\"errorDetails\":\"flag `errorFlag` does not exist\"}]}",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
response := BulkEvaluationResponseFrom(test.input, model.Metadata{})
|
||||
response := BulkEvaluationResponseFrom(test.input)
|
||||
|
||||
marshal, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
type IStore interface {
|
||||
GetAll(ctx context.Context) (map[string]model.Flag, error)
|
||||
Get(ctx context.Context, key string) (model.Flag, bool)
|
||||
SelectorForFlag(ctx context.Context, flag model.Flag) string
|
||||
}
|
||||
|
||||
type Flags struct {
|
||||
mx sync.RWMutex
|
||||
Flags map[string]model.Flag `json:"flags"`
|
||||
FlagSources []string
|
||||
SourceMetadata map[string]SourceDetails
|
||||
}
|
||||
|
||||
type SourceDetails struct {
|
||||
Source string
|
||||
Selector string
|
||||
}
|
||||
|
||||
func (f *Flags) hasPriority(stored string, new string) bool {
|
||||
if stored == new {
|
||||
return true
|
||||
}
|
||||
for i := len(f.FlagSources) - 1; i >= 0; i-- {
|
||||
switch f.FlagSources[i] {
|
||||
case stored:
|
||||
return false
|
||||
case new:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func NewFlags() *Flags {
|
||||
return &Flags{
|
||||
Flags: map[string]model.Flag{},
|
||||
SourceMetadata: map[string]SourceDetails{},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Flags) Set(key string, flag model.Flag) {
|
||||
f.mx.Lock()
|
||||
defer f.mx.Unlock()
|
||||
f.Flags[key] = flag
|
||||
}
|
||||
|
||||
func (f *Flags) Get(_ context.Context, key string) (model.Flag, bool) {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
flag, ok := f.Flags[key]
|
||||
|
||||
return flag, ok
|
||||
}
|
||||
|
||||
func (f *Flags) SelectorForFlag(_ context.Context, flag model.Flag) string {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
|
||||
return f.SourceMetadata[flag.Source].Selector
|
||||
}
|
||||
|
||||
func (f *Flags) Delete(key string) {
|
||||
f.mx.Lock()
|
||||
defer f.mx.Unlock()
|
||||
delete(f.Flags, key)
|
||||
}
|
||||
|
||||
func (f *Flags) String() (string, error) {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
bytes, err := json.Marshal(f)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to marshal flags: %w", err)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// GetAll returns a copy of the store's state (copy in order to be concurrency safe)
|
||||
func (f *Flags) GetAll(_ context.Context) (map[string]model.Flag, error) {
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
state := make(map[string]model.Flag, len(f.Flags))
|
||||
|
||||
for key, flag := range f.Flags {
|
||||
state[key] = flag
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// Add new flags from source.
|
||||
func (f *Flags) Add(logger *logger.Logger, source string, selector string, flags map[string]model.Flag,
|
||||
) map[string]interface{} {
|
||||
notifications := map[string]interface{}{}
|
||||
|
||||
for k, newFlag := range flags {
|
||||
storedFlag, ok := f.Get(context.Background(), k)
|
||||
if ok && !f.hasPriority(storedFlag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not overwriting: flag %s from source %s does not have priority over %s",
|
||||
k,
|
||||
source,
|
||||
storedFlag.Source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
// Store the new version of the flag
|
||||
newFlag.Source = source
|
||||
newFlag.Selector = selector
|
||||
f.Set(k, newFlag)
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
// Update existing flags from source.
|
||||
func (f *Flags) Update(logger *logger.Logger, source string, selector string, flags map[string]model.Flag,
|
||||
) map[string]interface{} {
|
||||
notifications := map[string]interface{}{}
|
||||
|
||||
for k, flag := range flags {
|
||||
storedFlag, ok := f.Get(context.Background(), k)
|
||||
if !ok {
|
||||
logger.Warn(
|
||||
fmt.Sprintf("failed to update the flag, flag with key %s from source %s does not exist.",
|
||||
k,
|
||||
source))
|
||||
|
||||
continue
|
||||
}
|
||||
if !f.hasPriority(storedFlag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not updating: flag %s from source %s does not have priority over %s",
|
||||
k,
|
||||
source,
|
||||
storedFlag.Source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
flag.Source = source
|
||||
flag.Selector = selector
|
||||
f.Set(k, flag)
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
// DeleteFlags matching flags from source.
|
||||
func (f *Flags) DeleteFlags(logger *logger.Logger, source string, flags map[string]model.Flag) map[string]interface{} {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"store resync triggered: delete event from source %s",
|
||||
source,
|
||||
),
|
||||
)
|
||||
ctx := context.Background()
|
||||
|
||||
notifications := map[string]interface{}{}
|
||||
if len(flags) == 0 {
|
||||
allFlags, err := f.GetAll(ctx)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("error while retrieving flags from the store: %v", err))
|
||||
return notifications
|
||||
}
|
||||
|
||||
for key, flag := range allFlags {
|
||||
if flag.Source != source {
|
||||
continue
|
||||
}
|
||||
notifications[key] = map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
"source": source,
|
||||
}
|
||||
f.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
for k := range flags {
|
||||
flag, ok := f.Get(ctx, k)
|
||||
if ok {
|
||||
if !f.hasPriority(flag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not deleting: flag %s from source %s cannot be deleted by %s",
|
||||
k,
|
||||
flag.Source,
|
||||
source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
f.Delete(k)
|
||||
} else {
|
||||
logger.Warn(
|
||||
fmt.Sprintf("failed to remove flag, flag with key %s from source %s does not exist.",
|
||||
k,
|
||||
source))
|
||||
}
|
||||
}
|
||||
|
||||
return notifications
|
||||
}
|
||||
|
||||
// Merge provided flags from source with currently stored flags.
|
||||
// nolint: funlen
|
||||
func (f *Flags) Merge(
|
||||
logger *logger.Logger,
|
||||
source string,
|
||||
selector string,
|
||||
flags map[string]model.Flag,
|
||||
) (map[string]interface{}, bool) {
|
||||
notifications := map[string]interface{}{}
|
||||
resyncRequired := false
|
||||
f.mx.Lock()
|
||||
for k, v := range f.Flags {
|
||||
if v.Source == source && v.Selector == selector {
|
||||
if _, ok := flags[k]; !ok {
|
||||
// flag has been deleted
|
||||
delete(f.Flags, k)
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationDelete),
|
||||
"source": source,
|
||||
}
|
||||
resyncRequired = true
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"store resync triggered: flag %s has been deleted from source %s",
|
||||
k, source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
f.mx.Unlock()
|
||||
for k, newFlag := range flags {
|
||||
newFlag.Source = source
|
||||
newFlag.Selector = selector
|
||||
storedFlag, ok := f.Get(context.Background(), k)
|
||||
if ok {
|
||||
if !f.hasPriority(storedFlag.Source, source) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"not merging: flag %s from source %s does not have priority over %s",
|
||||
k, source, storedFlag.Source,
|
||||
),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if reflect.DeepEqual(storedFlag, newFlag) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationCreate),
|
||||
"source": source,
|
||||
}
|
||||
} else {
|
||||
notifications[k] = map[string]interface{}{
|
||||
"type": string(model.NotificationUpdate),
|
||||
"source": source,
|
||||
}
|
||||
}
|
||||
// Store the new version of the flag
|
||||
f.Set(k, newFlag)
|
||||
}
|
||||
return notifications, resyncRequired
|
||||
}
|
|
@ -0,0 +1,545 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHasPriority(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
currentState *Flags
|
||||
storedSource string
|
||||
newSource string
|
||||
hasPriority bool
|
||||
}{
|
||||
{
|
||||
name: "same source",
|
||||
currentState: &Flags{},
|
||||
storedSource: "A",
|
||||
newSource: "A",
|
||||
hasPriority: true,
|
||||
},
|
||||
{
|
||||
name: "no priority",
|
||||
currentState: &Flags{
|
||||
FlagSources: []string{
|
||||
"B",
|
||||
"A",
|
||||
},
|
||||
},
|
||||
storedSource: "A",
|
||||
newSource: "B",
|
||||
hasPriority: false,
|
||||
},
|
||||
{
|
||||
name: "priority",
|
||||
currentState: &Flags{
|
||||
FlagSources: []string{
|
||||
"A",
|
||||
"B",
|
||||
},
|
||||
},
|
||||
storedSource: "A",
|
||||
newSource: "B",
|
||||
hasPriority: true,
|
||||
},
|
||||
{
|
||||
name: "not in sources",
|
||||
currentState: &Flags{
|
||||
FlagSources: []string{
|
||||
"A",
|
||||
"B",
|
||||
},
|
||||
},
|
||||
storedSource: "C",
|
||||
newSource: "D",
|
||||
hasPriority: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p := tt.currentState.hasPriority(tt.storedSource, tt.newSource)
|
||||
require.Equal(t, p, tt.hasPriority)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeFlags(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
current *Flags
|
||||
new map[string]model.Flag
|
||||
newSource string
|
||||
newSelector string
|
||||
want *Flags
|
||||
wantNotifs map[string]interface{}
|
||||
wantResync bool
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
current: &Flags{Flags: nil},
|
||||
new: nil,
|
||||
want: &Flags{Flags: nil},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "both empty flags",
|
||||
current: &Flags{Flags: map[string]model.Flag{}},
|
||||
new: map[string]model.Flag{},
|
||||
want: &Flags{Flags: map[string]model.Flag{}},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "empty new",
|
||||
current: &Flags{Flags: map[string]model.Flag{}},
|
||||
new: nil,
|
||||
want: &Flags{Flags: map[string]model.Flag{}},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "merging with new source",
|
||||
current: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"waka": {
|
||||
DefaultVariant: "off",
|
||||
Source: "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
new: map[string]model.Flag{
|
||||
"paka": {
|
||||
DefaultVariant: "on",
|
||||
},
|
||||
},
|
||||
newSource: "2",
|
||||
want: &Flags{Flags: map[string]model.Flag{
|
||||
"waka": {
|
||||
DefaultVariant: "off",
|
||||
Source: "1",
|
||||
},
|
||||
"paka": {
|
||||
DefaultVariant: "on",
|
||||
Source: "2",
|
||||
},
|
||||
}},
|
||||
wantNotifs: map[string]interface{}{"paka": map[string]interface{}{"type": "write", "source": "2"}},
|
||||
},
|
||||
{
|
||||
name: "override by new update",
|
||||
current: &Flags{Flags: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "off"},
|
||||
"paka": {DefaultVariant: "off"},
|
||||
}},
|
||||
new: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "on"},
|
||||
"paka": {DefaultVariant: "on"},
|
||||
},
|
||||
want: &Flags{Flags: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "on"},
|
||||
"paka": {DefaultVariant: "on"},
|
||||
}},
|
||||
wantNotifs: map[string]interface{}{
|
||||
"waka": map[string]interface{}{"type": "update", "source": ""},
|
||||
"paka": map[string]interface{}{"type": "update", "source": ""},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "identical update so empty notifications",
|
||||
current: &Flags{
|
||||
Flags: map[string]model.Flag{"hello": {DefaultVariant: "off"}},
|
||||
},
|
||||
new: map[string]model.Flag{
|
||||
"hello": {DefaultVariant: "off"},
|
||||
},
|
||||
want: &Flags{Flags: map[string]model.Flag{
|
||||
"hello": {DefaultVariant: "off"},
|
||||
}},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "deleted flag & trigger resync for same source",
|
||||
current: &Flags{Flags: map[string]model.Flag{"hello": {DefaultVariant: "off", Source: "A"}}},
|
||||
new: map[string]model.Flag{},
|
||||
newSource: "A",
|
||||
want: &Flags{Flags: map[string]model.Flag{}},
|
||||
wantNotifs: map[string]interface{}{"hello": map[string]interface{}{"type": "delete", "source": "A"}},
|
||||
wantResync: true,
|
||||
},
|
||||
{
|
||||
name: "no deleted & no resync for same source but different selector",
|
||||
current: &Flags{Flags: map[string]model.Flag{"hello": {DefaultVariant: "off", Source: "A", Selector: "X"}}},
|
||||
new: map[string]model.Flag{},
|
||||
newSource: "A",
|
||||
newSelector: "Y",
|
||||
want: &Flags{Flags: map[string]model.Flag{"hello": {DefaultVariant: "off", Source: "A", Selector: "X"}}},
|
||||
wantResync: false,
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "no merge due to low priority",
|
||||
current: &Flags{
|
||||
FlagSources: []string{
|
||||
"B",
|
||||
"A",
|
||||
},
|
||||
Flags: map[string]model.Flag{
|
||||
"hello": {
|
||||
DefaultVariant: "off",
|
||||
Source: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
new: map[string]model.Flag{"hello": {DefaultVariant: "off"}},
|
||||
newSource: "B",
|
||||
want: &Flags{
|
||||
FlagSources: []string{
|
||||
"B",
|
||||
"A",
|
||||
},
|
||||
Flags: map[string]model.Flag{
|
||||
"hello": {
|
||||
DefaultVariant: "off",
|
||||
Source: "A",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
gotNotifs, resyncRequired := tt.current.Merge(logger.NewLogger(nil, false), tt.newSource, tt.newSelector, tt.new)
|
||||
|
||||
require.True(t, reflect.DeepEqual(tt.want, tt.current))
|
||||
require.Equal(t, tt.wantNotifs, gotNotifs)
|
||||
require.Equal(t, tt.wantResync, resyncRequired)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlags_Add(t *testing.T) {
|
||||
mockLogger := logger.NewLogger(nil, false)
|
||||
mockSource := "source"
|
||||
mockOverrideSource := "source-2"
|
||||
|
||||
type request struct {
|
||||
source string
|
||||
selector string
|
||||
flags map[string]model.Flag
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storedState *Flags
|
||||
addRequest request
|
||||
expectedState *Flags
|
||||
expectedNotificationKeys []string
|
||||
}{
|
||||
{
|
||||
name: "Add success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
addRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"B"},
|
||||
},
|
||||
{
|
||||
name: "Add multiple success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
addRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"B", "C"},
|
||||
},
|
||||
{
|
||||
name: "Add success - conflict and override",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
addRequest: request{
|
||||
source: mockOverrideSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
messages := tt.storedState.Add(mockLogger, tt.addRequest.source, tt.addRequest.selector, tt.addRequest.flags)
|
||||
|
||||
require.Equal(t, tt.storedState, tt.expectedState)
|
||||
|
||||
for k := range messages {
|
||||
require.Containsf(t, tt.expectedNotificationKeys, k,
|
||||
"Message key %s not present in the expected key list", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlags_Update(t *testing.T) {
|
||||
mockLogger := logger.NewLogger(nil, false)
|
||||
mockSource := "source"
|
||||
mockOverrideSource := "source-2"
|
||||
|
||||
type request struct {
|
||||
source string
|
||||
selector string
|
||||
flags map[string]model.Flag
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storedState *Flags
|
||||
UpdateRequest request
|
||||
expectedState *Flags
|
||||
expectedNotificationKeys []string
|
||||
}{
|
||||
{
|
||||
name: "Update success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
{
|
||||
name: "Update multiple success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "True"},
|
||||
"B": {Source: mockSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
"B": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "False"},
|
||||
"B": {Source: mockSource, DefaultVariant: "False"},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A", "B"},
|
||||
},
|
||||
{
|
||||
name: "Update success - conflict and override",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockOverrideSource,
|
||||
flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockOverrideSource, DefaultVariant: "True"},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
{
|
||||
name: "Update fail",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
UpdateRequest: request{
|
||||
source: mockSource,
|
||||
flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
messages := tt.storedState.Update(mockLogger, tt.UpdateRequest.source,
|
||||
tt.UpdateRequest.selector, tt.UpdateRequest.flags)
|
||||
|
||||
require.Equal(t, tt.storedState, tt.expectedState)
|
||||
|
||||
for k := range messages {
|
||||
require.Containsf(t, tt.expectedNotificationKeys, k,
|
||||
"Message key %s not present in the expected key list", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlags_Delete(t *testing.T) {
|
||||
mockLogger := logger.NewLogger(nil, false)
|
||||
mockSource := "source"
|
||||
mockSource2 := "source2"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
storedState *Flags
|
||||
deleteRequest map[string]model.Flag
|
||||
expectedState *Flags
|
||||
expectedNotificationKeys []string
|
||||
}{
|
||||
{
|
||||
name: "Remove success",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
deleteRequest: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A"},
|
||||
},
|
||||
{
|
||||
name: "Nothing to remove",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
deleteRequest: map[string]model.Flag{
|
||||
"C": {Source: mockSource},
|
||||
},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
FlagSources: []string{
|
||||
mockSource,
|
||||
mockSource2,
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{},
|
||||
},
|
||||
{
|
||||
name: "Remove all",
|
||||
storedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"A": {Source: mockSource},
|
||||
"B": {Source: mockSource},
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
},
|
||||
deleteRequest: map[string]model.Flag{},
|
||||
expectedState: &Flags{
|
||||
Flags: map[string]model.Flag{
|
||||
"C": {Source: mockSource2},
|
||||
},
|
||||
},
|
||||
expectedNotificationKeys: []string{"A", "B"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
messages := tt.storedState.DeleteFlags(mockLogger, mockSource, tt.deleteRequest)
|
||||
|
||||
require.Equal(t, tt.storedState, tt.expectedState)
|
||||
|
||||
for k := range messages {
|
||||
require.Containsf(t, tt.expectedNotificationKeys, k,
|
||||
"Message key %s not present in the expected key list", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,133 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
uuid "github.com/google/uuid"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
// flags table and index constants
|
||||
const flagsTable = "flags"
|
||||
|
||||
const idIndex = "id"
|
||||
const keyIndex = "key"
|
||||
const sourceIndex = "source"
|
||||
const priorityIndex = "priority"
|
||||
const flagSetIdIndex = "flagSetId"
|
||||
|
||||
// compound indices; maintain sub-indexes alphabetically; order matters; these must match what's generated in the SelectorMapToQuery func.
|
||||
const flagSetIdSourceCompoundIndex = flagSetIdIndex + "+" + sourceIndex
|
||||
const keySourceCompoundIndex = keyIndex + "+" + sourceIndex
|
||||
const flagSetIdKeySourceCompoundIndex = flagSetIdIndex + "+" + keyIndex + "+" + sourceIndex
|
||||
|
||||
// flagSetId defaults to a UUID generated at startup to make our queries consistent
|
||||
// any flag without a "flagSetId" is assigned this one; it's never exposed externally
|
||||
var nilFlagSetId = uuid.New().String()
|
||||
|
||||
// A selector represents a set of constraints used to query the store.
|
||||
type Selector struct {
|
||||
indexMap map[string]string
|
||||
}
|
||||
|
||||
// NewSelector creates a new Selector from a selector expression string.
|
||||
// For example, to select flags from source "./mySource" and flagSetId "1234", use the expression:
|
||||
// "source=./mySource,flagSetId=1234"
|
||||
func NewSelector(selectorExpression string) Selector {
|
||||
return Selector{
|
||||
indexMap: expressionToMap(selectorExpression),
|
||||
}
|
||||
}
|
||||
|
||||
func expressionToMap(sExp string) map[string]string {
|
||||
selectorMap := make(map[string]string)
|
||||
if sExp == "" {
|
||||
return selectorMap
|
||||
}
|
||||
|
||||
if strings.Index(sExp, "=") == -1 {
|
||||
// if no '=' is found, treat the whole string as as source (backwards compatibility)
|
||||
// we may may support interpreting this as a flagSetId in the future as an option
|
||||
selectorMap[sourceIndex] = sExp
|
||||
return selectorMap
|
||||
}
|
||||
|
||||
// Split the selector by commas
|
||||
pairs := strings.Split(sExp, ",")
|
||||
for _, pair := range pairs {
|
||||
// Split each pair by the first equal sign
|
||||
parts := strings.Split(pair, "=")
|
||||
if len(parts) == 2 {
|
||||
key := parts[0]
|
||||
value := parts[1]
|
||||
selectorMap[key] = value
|
||||
}
|
||||
}
|
||||
return selectorMap
|
||||
}
|
||||
|
||||
func (s Selector) WithIndex(key string, value string) Selector {
|
||||
m := maps.Clone(s.indexMap)
|
||||
m[key] = value
|
||||
return Selector{
|
||||
indexMap: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Selector) IsEmpty() bool {
|
||||
return s == nil || len(s.indexMap) == 0
|
||||
}
|
||||
|
||||
// SelectorMapToQuery converts the selector map to an indexId and constraints for querying the store.
|
||||
// For a given index, a specific order and number of constraints are required.
|
||||
// Both the indexId and constraints are generated based on the keys present in the selector's internal map.
|
||||
func (s Selector) ToQuery() (indexId string, constraints []interface{}) {
|
||||
|
||||
if len(s.indexMap) == 2 && s.indexMap[flagSetIdIndex] != "" && s.indexMap[keyIndex] != "" {
|
||||
// special case for flagSetId and key (this is the "id" index)
|
||||
return idIndex, []interface{}{s.indexMap[flagSetIdIndex], s.indexMap[keyIndex]}
|
||||
}
|
||||
|
||||
qs := []string{}
|
||||
keys := make([]string, 0, len(s.indexMap))
|
||||
|
||||
for key := range s.indexMap {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
indexId += key + "+"
|
||||
qs = append(qs, s.indexMap[key])
|
||||
}
|
||||
|
||||
indexId = strings.TrimSuffix(indexId, "+")
|
||||
// Convert []string to []interface{}
|
||||
c := make([]interface{}, 0, len(qs))
|
||||
for _, v := range qs {
|
||||
c = append(c, v)
|
||||
}
|
||||
constraints = c
|
||||
|
||||
return indexId, constraints
|
||||
}
|
||||
|
||||
// SelectorToMetadata converts the selector's internal map to metadata for logging or tracing purposes.
|
||||
// Only includes known indices to avoid leaking sensitive information, and is usually returned as the "top level" metadata
|
||||
func (s *Selector) ToMetadata() model.Metadata {
|
||||
meta := model.Metadata{}
|
||||
|
||||
if s == nil || s.indexMap == nil {
|
||||
return meta
|
||||
}
|
||||
|
||||
if s.indexMap[flagSetIdIndex] != "" {
|
||||
meta[flagSetIdIndex] = s.indexMap[flagSetIdIndex]
|
||||
}
|
||||
if s.indexMap[sourceIndex] != "" {
|
||||
meta[sourceIndex] = s.indexMap[sourceIndex]
|
||||
}
|
||||
return meta
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
)
|
||||
|
||||
func TestSelector_IsEmpty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
wantEmpty bool
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
selector: nil,
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "nil indexMap",
|
||||
selector: &Selector{indexMap: nil},
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "empty indexMap",
|
||||
selector: &Selector{indexMap: map[string]string{}},
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "non-empty indexMap",
|
||||
selector: &Selector{indexMap: map[string]string{"source": "abc"}},
|
||||
wantEmpty: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.selector.IsEmpty()
|
||||
if got != tt.wantEmpty {
|
||||
t.Errorf("IsEmpty() = %v, want %v", got, tt.wantEmpty)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_WithIndex(t *testing.T) {
|
||||
oldS := Selector{indexMap: map[string]string{"source": "abc"}}
|
||||
newS := oldS.WithIndex("flagSetId", "1234")
|
||||
|
||||
if newS.indexMap["source"] != "abc" {
|
||||
t.Errorf("WithIndex did not preserve existing keys")
|
||||
}
|
||||
if newS.indexMap["flagSetId"] != "1234" {
|
||||
t.Errorf("WithIndex did not add new key")
|
||||
}
|
||||
// Ensure original is unchanged
|
||||
if _, ok := oldS.indexMap["flagSetId"]; ok {
|
||||
t.Errorf("WithIndex mutated original selector")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_ToQuery(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selector Selector
|
||||
wantIndex string
|
||||
wantConstr []interface{}
|
||||
}{
|
||||
{
|
||||
name: "flagSetId and key primary index special case",
|
||||
selector: Selector{indexMap: map[string]string{"flagSetId": "fsid", "key": "myKey"}},
|
||||
wantIndex: "id",
|
||||
wantConstr: []interface{}{"fsid", "myKey"},
|
||||
},
|
||||
{
|
||||
name: "multiple keys sorted",
|
||||
selector: Selector{indexMap: map[string]string{"source": "src", "flagSetId": "fsid"}},
|
||||
wantIndex: "flagSetId+source",
|
||||
wantConstr: []interface{}{"fsid", "src"},
|
||||
},
|
||||
{
|
||||
name: "single key",
|
||||
selector: Selector{indexMap: map[string]string{"source": "src"}},
|
||||
wantIndex: "source",
|
||||
wantConstr: []interface{}{"src"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotIndex, gotConstr := tt.selector.ToQuery()
|
||||
if gotIndex != tt.wantIndex {
|
||||
t.Errorf("ToQuery() index = %v, want %v", gotIndex, tt.wantIndex)
|
||||
}
|
||||
if !reflect.DeepEqual(gotConstr, tt.wantConstr) {
|
||||
t.Errorf("ToQuery() constraints = %v, want %v", gotConstr, tt.wantConstr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_ToMetadata(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
want model.Metadata
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
selector: nil,
|
||||
want: model.Metadata{},
|
||||
},
|
||||
{
|
||||
name: "nil indexMap",
|
||||
selector: &Selector{indexMap: nil},
|
||||
want: model.Metadata{},
|
||||
},
|
||||
{
|
||||
name: "empty indexMap",
|
||||
selector: &Selector{indexMap: map[string]string{}},
|
||||
want: model.Metadata{},
|
||||
},
|
||||
{
|
||||
name: "flagSetId only",
|
||||
selector: &Selector{indexMap: map[string]string{"flagSetId": "fsid"}},
|
||||
want: model.Metadata{"flagSetId": "fsid"},
|
||||
},
|
||||
{
|
||||
name: "source only",
|
||||
selector: &Selector{indexMap: map[string]string{"source": "src"}},
|
||||
want: model.Metadata{"source": "src"},
|
||||
},
|
||||
{
|
||||
name: "flagSetId and source",
|
||||
selector: &Selector{indexMap: map[string]string{"flagSetId": "fsid", "source": "src"}},
|
||||
want: model.Metadata{"flagSetId": "fsid", "source": "src"},
|
||||
},
|
||||
{
|
||||
name: "flagSetId, source, and key (key should be ignored)",
|
||||
selector: &Selector{indexMap: map[string]string{"flagSetId": "fsid", "source": "src", "key": "myKey"}},
|
||||
want: model.Metadata{"flagSetId": "fsid", "source": "src"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.selector.ToMetadata()
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("ToMetadata() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSelector(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantMap map[string]string
|
||||
}{
|
||||
{
|
||||
name: "source and flagSetId",
|
||||
input: "source=abc,flagSetId=1234",
|
||||
wantMap: map[string]string{"source": "abc", "flagSetId": "1234"},
|
||||
},
|
||||
{
|
||||
name: "source",
|
||||
input: "source=abc",
|
||||
wantMap: map[string]string{"source": "abc"},
|
||||
},
|
||||
{
|
||||
name: "no equals, treat as source",
|
||||
input: "mysource",
|
||||
wantMap: map[string]string{"source": "mysource"},
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
wantMap: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := NewSelector(tt.input)
|
||||
if !reflect.DeepEqual(s.indexMap, tt.wantMap) {
|
||||
t.Errorf("NewSelector(%q) indexMap = %v, want %v", tt.input, s.indexMap, tt.wantMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,396 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/open-feature/flagd/core/pkg/notifications"
|
||||
)
|
||||
|
||||
var noValidatedSources = []string{}
|
||||
|
||||
type SelectorContextKey struct{}
|
||||
|
||||
type FlagQueryResult struct {
|
||||
Flags map[string]model.Flag
|
||||
}
|
||||
|
||||
type IStore interface {
|
||||
Get(ctx context.Context, key string, selector *Selector) (model.Flag, model.Metadata, error)
|
||||
GetAll(ctx context.Context, selector *Selector) (map[string]model.Flag, model.Metadata, error)
|
||||
Watch(ctx context.Context, selector *Selector, watcher chan<- FlagQueryResult)
|
||||
}
|
||||
|
||||
var _ IStore = (*Store)(nil)
|
||||
|
||||
type Store struct {
|
||||
mx sync.RWMutex
|
||||
db *memdb.MemDB
|
||||
logger *logger.Logger
|
||||
sources []string
|
||||
// deprecated: has no effect and will be removed soon.
|
||||
FlagSources []string
|
||||
}
|
||||
|
||||
type SourceDetails struct {
|
||||
Source string
|
||||
Selector string
|
||||
}
|
||||
|
||||
// NewStore creates a new in-memory store with the given sources.
|
||||
// The order of sources in the slice determines their priority, when queries result in duplicate flags (queries without source or flagSetId), the higher priority source "wins".
|
||||
func NewStore(logger *logger.Logger, sources []string) (*Store, error) {
|
||||
|
||||
// a unique index must exist for each set of constraints - for example, to look up by key and source, we need a compound index on key+source, etc
|
||||
// we maybe want to generate these dynamically in the future to support more robust querying, but for now we will hardcode the ones we need
|
||||
schema := &memdb.DBSchema{
|
||||
Tables: map[string]*memdb.TableSchema{
|
||||
flagsTable: {
|
||||
Name: flagsTable,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
// primary index; must be unique and named "id"
|
||||
idIndex: {
|
||||
Name: idIndex,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
// for looking up by source
|
||||
sourceIndex: {
|
||||
Name: sourceIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
// for looking up by priority, used to maintain highest priority flag when there are duplicates and no selector is provided
|
||||
priorityIndex: {
|
||||
Name: priorityIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.IntFieldIndex{Field: model.Priority},
|
||||
},
|
||||
// for looking up by flagSetId
|
||||
flagSetIdIndex: {
|
||||
Name: flagSetIdIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
},
|
||||
keyIndex: {
|
||||
Name: keyIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
},
|
||||
flagSetIdSourceCompoundIndex: {
|
||||
Name: flagSetIdSourceCompoundIndex,
|
||||
Unique: false,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
keySourceCompoundIndex: {
|
||||
Name: keySourceCompoundIndex,
|
||||
Unique: false, // duplicate from a single source ARE allowed (they just must have different flag sets)
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
// used to query all flags from a specific source so we know which flags to delete if a flag is missing from a source
|
||||
flagSetIdKeySourceCompoundIndex: {
|
||||
Name: flagSetIdKeySourceCompoundIndex,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{Field: model.FlagSetId, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Key, Lowercase: false},
|
||||
&memdb.StringFieldIndex{Field: model.Source, Lowercase: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a new data base
|
||||
db, err := memdb.NewMemDB(schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize flag database: %w", err)
|
||||
}
|
||||
|
||||
// clone the sources to avoid modifying the original slice
|
||||
s := slices.Clone(sources)
|
||||
|
||||
return &Store{
|
||||
sources: s,
|
||||
db: db,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: use NewStore instead - will be removed very soon.
|
||||
func NewFlags() *Store {
|
||||
state, err := NewStore(logger.NewLogger(nil, false), noValidatedSources)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unable to create flag store: %v", err))
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
func (s *Store) Get(_ context.Context, key string, selector *Selector) (model.Flag, model.Metadata, error) {
|
||||
s.logger.Debug(fmt.Sprintf("getting flag %s", key))
|
||||
txn := s.db.Txn(false)
|
||||
queryMeta := selector.ToMetadata()
|
||||
|
||||
// if present, use the selector to query the flags
|
||||
if !selector.IsEmpty() {
|
||||
selector := selector.WithIndex("key", key)
|
||||
indexId, constraints := selector.ToQuery()
|
||||
s.logger.Debug(fmt.Sprintf("getting flag with query: %s, %v", indexId, constraints))
|
||||
raw, err := txn.First(flagsTable, indexId, constraints...)
|
||||
flag, ok := raw.(model.Flag)
|
||||
if err != nil {
|
||||
return model.Flag{}, queryMeta, fmt.Errorf("flag %s not found: %w", key, err)
|
||||
}
|
||||
if !ok {
|
||||
return model.Flag{}, queryMeta, fmt.Errorf("flag %s is not a valid flag", key)
|
||||
}
|
||||
return flag, queryMeta, nil
|
||||
|
||||
}
|
||||
// otherwise, get all flags with the given key, and keep the last one with the highest priority
|
||||
s.logger.Debug(fmt.Sprintf("getting highest priority flag with key: %s", key))
|
||||
it, err := txn.Get(flagsTable, keyIndex, key)
|
||||
if err != nil {
|
||||
return model.Flag{}, queryMeta, fmt.Errorf("flag %s not found: %w", key, err)
|
||||
}
|
||||
flag := model.Flag{}
|
||||
found := false
|
||||
for raw := it.Next(); raw != nil; raw = it.Next() {
|
||||
nextFlag, ok := raw.(model.Flag)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
if nextFlag.Priority >= flag.Priority {
|
||||
flag = nextFlag
|
||||
} else {
|
||||
s.logger.Debug(fmt.Sprintf("discarding flag %s from lower priority source %s in favor of flag from source %s", nextFlag.Key, s.sources[nextFlag.Priority], s.sources[flag.Priority]))
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return flag, queryMeta, fmt.Errorf("flag %s not found", key)
|
||||
}
|
||||
return flag, queryMeta, nil
|
||||
}
|
||||
|
||||
func (f *Store) String() (string, error) {
|
||||
f.logger.Debug("dumping flags to string")
|
||||
f.mx.RLock()
|
||||
defer f.mx.RUnlock()
|
||||
|
||||
state, _, err := f.GetAll(context.Background(), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get all flags: %w", err)
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to marshal flags: %w", err)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// GetAll returns a copy of the store's state (copy in order to be concurrency safe)
|
||||
func (s *Store) GetAll(ctx context.Context, selector *Selector) (map[string]model.Flag, model.Metadata, error) {
|
||||
flags := make(map[string]model.Flag)
|
||||
queryMeta := selector.ToMetadata()
|
||||
it, err := s.selectOrAll(selector)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("flag query error: %v", err))
|
||||
return flags, queryMeta, err
|
||||
}
|
||||
flags = s.collect(it)
|
||||
return flags, queryMeta, nil
|
||||
}
|
||||
|
||||
// Update the flag state with the provided flags.
|
||||
func (s *Store) Update(
|
||||
source string,
|
||||
flags map[string]model.Flag,
|
||||
metadata model.Metadata,
|
||||
) (map[string]interface{}, bool) {
|
||||
resyncRequired := false
|
||||
|
||||
if source == "" {
|
||||
panic("source cannot be empty")
|
||||
}
|
||||
|
||||
priority := slices.Index(s.sources, source)
|
||||
if priority == -1 {
|
||||
// this is a hack to allow old constructors that didn't pass sources, remove when we remove "NewFlags" constructor
|
||||
if !slices.Equal(s.sources, noValidatedSources) {
|
||||
panic(fmt.Sprintf("source %s is not registered in the store", source))
|
||||
}
|
||||
// same as above - remove when we remove "NewFlags" constructor
|
||||
priority = 0
|
||||
}
|
||||
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// get all flags for the source we are updating
|
||||
selector := NewSelector(sourceIndex + "=" + source)
|
||||
oldFlags, _, _ := s.GetAll(context.Background(), &selector)
|
||||
|
||||
s.mx.Lock()
|
||||
for key := range oldFlags {
|
||||
if _, ok := flags[key]; !ok {
|
||||
// flag has been deleted
|
||||
s.logger.Debug(fmt.Sprintf("flag %s has been deleted from source %s", key, source))
|
||||
|
||||
count, err := txn.DeleteAll(flagsTable, keySourceCompoundIndex, key, source)
|
||||
s.logger.Debug(fmt.Sprintf("deleted %d flags with key %s from source %s", count, key, source))
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("error deleting flag: %s, %v", key, err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
s.mx.Unlock()
|
||||
for key, newFlag := range flags {
|
||||
s.logger.Debug(fmt.Sprintf("got metadata %v", metadata))
|
||||
|
||||
newFlag.Key = key
|
||||
newFlag.Source = source
|
||||
newFlag.Priority = priority
|
||||
newFlag.Metadata = patchMetadata(metadata, newFlag.Metadata)
|
||||
|
||||
// flagSetId defaults to a UUID generated at startup to make our queries isomorphic
|
||||
flagSetId := nilFlagSetId
|
||||
// flagSetId is inherited from the set, but can be overridden by the flag
|
||||
setFlagSetId, ok := newFlag.Metadata["flagSetId"].(string)
|
||||
if ok {
|
||||
flagSetId = setFlagSetId
|
||||
}
|
||||
newFlag.FlagSetId = flagSetId
|
||||
|
||||
raw, err := txn.First(flagsTable, keySourceCompoundIndex, key, source)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("unable to get flag %s from source %s: %v", key, source, err))
|
||||
continue
|
||||
}
|
||||
oldFlag, ok := raw.(model.Flag)
|
||||
// If we already have a flag with the same key and source, we need to check if it has the same flagSetId
|
||||
if ok {
|
||||
if oldFlag.FlagSetId != newFlag.FlagSetId {
|
||||
// If the flagSetId is different, we need to delete the entry, since flagSetId+key represents the primary index, and it's now been changed.
|
||||
// This is important especially for clients listening to flagSetId changes, as they expect the flag to be removed from the set in this case.
|
||||
_, err = txn.DeleteAll(flagsTable, idIndex, oldFlag.FlagSetId, key)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("unable to delete flags with key %s and flagSetId %s: %v", key, oldFlag.FlagSetId, err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// Store the new version of the flag
|
||||
s.logger.Debug(fmt.Sprintf("storing flag: %v", newFlag))
|
||||
err = txn.Insert(flagsTable, newFlag)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("unable to insert flag %s: %v", key, err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
txn.Commit()
|
||||
return notifications.NewFromFlags(oldFlags, flags), resyncRequired
|
||||
}
|
||||
|
||||
// Watch the result-set of a selector for changes, sending updates to the watcher channel.
|
||||
func (s *Store) Watch(ctx context.Context, selector *Selector, watcher chan<- FlagQueryResult) {
|
||||
go func() {
|
||||
for {
|
||||
ws := memdb.NewWatchSet()
|
||||
it, err := s.selectOrAll(selector)
|
||||
if err != nil {
|
||||
s.logger.Error(fmt.Sprintf("error watching flags: %v", err))
|
||||
close(watcher)
|
||||
return
|
||||
}
|
||||
ws.Add(it.WatchCh())
|
||||
|
||||
flags := s.collect(it)
|
||||
watcher <- FlagQueryResult{
|
||||
Flags: flags,
|
||||
}
|
||||
|
||||
if err = ws.WatchCtx(ctx); err != nil {
|
||||
s.logger.Error(fmt.Sprintf("error watching flags: %v", err))
|
||||
close(watcher)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// returns an iterator for the given selector, or all flags if the selector is nil or empty
|
||||
func (s *Store) selectOrAll(selector *Selector) (it memdb.ResultIterator, err error) {
|
||||
txn := s.db.Txn(false)
|
||||
if !selector.IsEmpty() {
|
||||
indexId, constraints := selector.ToQuery()
|
||||
s.logger.Debug(fmt.Sprintf("getting all flags with query: %s, %v", indexId, constraints))
|
||||
return txn.Get(flagsTable, indexId, constraints...)
|
||||
} else {
|
||||
// no selector, get all flags
|
||||
return txn.Get(flagsTable, idIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// collects flags from an iterator, ensuring that only the highest priority flag is kept when there are duplicates
|
||||
func (s *Store) collect(it memdb.ResultIterator) map[string]model.Flag {
|
||||
flags := make(map[string]model.Flag)
|
||||
for raw := it.Next(); raw != nil; raw = it.Next() {
|
||||
flag := raw.(model.Flag)
|
||||
if existing, ok := flags[flag.Key]; ok {
|
||||
if flag.Priority < existing.Priority {
|
||||
s.logger.Debug(fmt.Sprintf("discarding duplicate flag %s from lower priority source %s in favor of flag from source %s", flag.Key, s.sources[flag.Priority], s.sources[existing.Priority]))
|
||||
continue // we already have a higher priority flag
|
||||
}
|
||||
s.logger.Debug(fmt.Sprintf("overwriting duplicate flag %s from lower priority source %s in favor of flag from source %s", flag.Key, s.sources[existing.Priority], s.sources[flag.Priority]))
|
||||
}
|
||||
flags[flag.Key] = flag
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func patchMetadata(original, patch model.Metadata) model.Metadata {
|
||||
patched := make(model.Metadata)
|
||||
if original == nil && patch == nil {
|
||||
return nil
|
||||
}
|
||||
for key, value := range original {
|
||||
patched[key] = value
|
||||
}
|
||||
for key, value := range patch { // patch values overwrite m1 values on key conflict
|
||||
patched[key] = value
|
||||
}
|
||||
return patched
|
||||
}
|
|
@ -1,487 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUpdateFlags(t *testing.T) {
|
||||
|
||||
const source1 = "source1"
|
||||
const source2 = "source2"
|
||||
var sources = []string{source1, source2}
|
||||
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) *Store
|
||||
newFlags map[string]model.Flag
|
||||
source string
|
||||
wantFlags map[string]model.Flag
|
||||
setMetadata model.Metadata
|
||||
wantNotifs map[string]interface{}
|
||||
wantResync bool
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
source: source1,
|
||||
newFlags: nil,
|
||||
wantFlags: map[string]model.Flag{},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "both empty flags",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
source: source1,
|
||||
newFlags: map[string]model.Flag{},
|
||||
wantFlags: map[string]model.Flag{},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "empty new",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
return s
|
||||
},
|
||||
source: source1,
|
||||
newFlags: nil,
|
||||
wantFlags: map[string]model.Flag{},
|
||||
wantNotifs: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
name: "update from source 1 (old flag removed)",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
s.Update(source1, map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "off"},
|
||||
}, nil)
|
||||
return s
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"paka": {DefaultVariant: "on"},
|
||||
},
|
||||
source: source1,
|
||||
wantFlags: map[string]model.Flag{
|
||||
"paka": {Key: "paka", DefaultVariant: "on", Source: source1, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{
|
||||
"paka": map[string]interface{}{"type": "write"},
|
||||
"waka": map[string]interface{}{"type": "delete"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update from source 1 (new flag added)",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
s.Update(source1, map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "off"},
|
||||
}, nil)
|
||||
return s
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"paka": {DefaultVariant: "on"},
|
||||
},
|
||||
source: source2,
|
||||
wantFlags: map[string]model.Flag{
|
||||
"waka": {Key: "waka", DefaultVariant: "off", Source: source1, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
"paka": {Key: "paka", DefaultVariant: "on", Source: source2, FlagSetId: nilFlagSetId, Priority: 1},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{"paka": map[string]interface{}{"type": "write"}},
|
||||
},
|
||||
{
|
||||
name: "flag set inheritance",
|
||||
setup: func(t *testing.T) *Store {
|
||||
s, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
s.Update(source1, map[string]model.Flag{}, model.Metadata{})
|
||||
return s
|
||||
},
|
||||
setMetadata: model.Metadata{
|
||||
"flagSetId": "topLevelSet", // top level set metadata, including flagSetId
|
||||
},
|
||||
newFlags: map[string]model.Flag{
|
||||
"waka": {DefaultVariant: "on"},
|
||||
"paka": {DefaultVariant: "on", Metadata: model.Metadata{"flagSetId": "flagLevelSet"}}, // overrides set level flagSetId
|
||||
},
|
||||
source: source1,
|
||||
wantFlags: map[string]model.Flag{
|
||||
"waka": {Key: "waka", DefaultVariant: "on", Source: source1, FlagSetId: "topLevelSet", Priority: 0, Metadata: model.Metadata{"flagSetId": "topLevelSet"}},
|
||||
"paka": {Key: "paka", DefaultVariant: "on", Source: source1, FlagSetId: "flagLevelSet", Priority: 0, Metadata: model.Metadata{"flagSetId": "flagLevelSet"}},
|
||||
},
|
||||
wantNotifs: map[string]interface{}{
|
||||
"paka": map[string]interface{}{"type": "write"},
|
||||
"waka": map[string]interface{}{"type": "write"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
store := tt.setup(t)
|
||||
gotNotifs, resyncRequired := store.Update(tt.source, tt.newFlags, tt.setMetadata)
|
||||
gotFlags, _, _ := store.GetAll(context.Background(), nil)
|
||||
|
||||
require.Equal(t, tt.wantFlags, gotFlags)
|
||||
require.Equal(t, tt.wantNotifs, gotNotifs)
|
||||
require.Equal(t, tt.wantResync, resyncRequired)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
sourceB := "sourceB"
|
||||
sourceC := "sourceC"
|
||||
flagSetIdB := "flagSetIdA"
|
||||
flagSetIdC := "flagSetIdC"
|
||||
var sources = []string{sourceA, sourceB, sourceC}
|
||||
|
||||
sourceASelector := NewSelector("source=" + sourceA)
|
||||
flagSetIdCSelector := NewSelector("flagSetId=" + flagSetIdC)
|
||||
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
selector *Selector
|
||||
wantFlag model.Flag
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
key: "flagA",
|
||||
selector: nil,
|
||||
wantFlag: model.Flag{Key: "flagA", DefaultVariant: "off", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "flagSetId selector",
|
||||
key: "dupe",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantFlag: model.Flag{Key: "dupe", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "source selector",
|
||||
key: "dupe",
|
||||
selector: &sourceASelector,
|
||||
wantFlag: model.Flag{Key: "dupe", DefaultVariant: "on", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "flag not found with source selector",
|
||||
key: "flagB",
|
||||
selector: &sourceASelector,
|
||||
wantFlag: model.Flag{Key: "flagB", DefaultVariant: "off", Source: sourceB, FlagSetId: flagSetIdB, Priority: 1, Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "flag not found with flagSetId selector",
|
||||
key: "flagB",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantFlag: model.Flag{Key: "flagB", DefaultVariant: "off", Source: sourceB, FlagSetId: flagSetIdB, Priority: 1, Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "on"},
|
||||
}
|
||||
sourceBFlags := map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
}
|
||||
sourceCFlags := map[string]model.Flag{
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
store.Update(sourceA, sourceAFlags, nil)
|
||||
store.Update(sourceB, sourceBFlags, nil)
|
||||
store.Update(sourceC, sourceCFlags, nil)
|
||||
gotFlag, _, err := store.Get(context.Background(), tt.key, tt.selector)
|
||||
|
||||
if !tt.wantErr {
|
||||
require.Equal(t, tt.wantFlag, gotFlag)
|
||||
} else {
|
||||
require.Error(t, err, "expected an error for key %s with selector %v", tt.key, tt.selector)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllNoWatcher(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
sourceB := "sourceB"
|
||||
sourceC := "sourceC"
|
||||
flagSetIdB := "flagSetIdA"
|
||||
flagSetIdC := "flagSetIdC"
|
||||
sources := []string{sourceA, sourceB, sourceC}
|
||||
|
||||
sourceASelector := NewSelector("source=" + sourceA)
|
||||
flagSetIdCSelector := NewSelector("flagSetId=" + flagSetIdC)
|
||||
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
wantFlags map[string]model.Flag
|
||||
}{
|
||||
{
|
||||
name: "nil selector",
|
||||
selector: nil,
|
||||
wantFlags: map[string]model.Flag{
|
||||
// "dupe" should be overwritten by higher priority flag
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Source: sourceB, FlagSetId: flagSetIdB, Priority: 1, Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "source selector",
|
||||
selector: &sourceASelector,
|
||||
wantFlags: map[string]model.Flag{
|
||||
// we should get the "dupe" from sourceA
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "on", Source: sourceA, FlagSetId: nilFlagSetId, Priority: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "flagSetId selector",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantFlags: map[string]model.Flag{
|
||||
// we should get the "dupe" from flagSetIdC
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Source: sourceC, FlagSetId: flagSetIdC, Priority: 2, Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "on"},
|
||||
}
|
||||
sourceBFlags := map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdB}},
|
||||
}
|
||||
sourceCFlags := map[string]model.Flag{
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
"dupe": {Key: "dupe", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": flagSetIdC}},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
store.Update(sourceA, sourceAFlags, nil)
|
||||
store.Update(sourceB, sourceBFlags, nil)
|
||||
store.Update(sourceC, sourceCFlags, nil)
|
||||
gotFlags, _, _ := store.GetAll(context.Background(), tt.selector)
|
||||
|
||||
require.Equal(t, len(tt.wantFlags), len(gotFlags))
|
||||
require.Equal(t, tt.wantFlags, gotFlags)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
sourceB := "sourceB"
|
||||
sourceC := "sourceC"
|
||||
myFlagSetId := "myFlagSet"
|
||||
var sources = []string{sourceA, sourceB, sourceC}
|
||||
pauseTime := 100 * time.Millisecond // time for updates to settle
|
||||
timeout := 1000 * time.Millisecond // time to make sure we get enough updates, and no extras
|
||||
|
||||
sourceASelector := NewSelector("source=" + sourceA)
|
||||
flagSetIdCSelector := NewSelector("flagSetId=" + myFlagSetId)
|
||||
emptySelector := NewSelector("")
|
||||
sourceCSelector := NewSelector("source=" + sourceC)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
selector *Selector
|
||||
wantUpdates int
|
||||
}{
|
||||
{
|
||||
name: "flag source selector (initial, plus 1 update)",
|
||||
selector: &sourceASelector,
|
||||
wantUpdates: 2,
|
||||
},
|
||||
{
|
||||
name: "flag set selector (initial, plus 3 updates)",
|
||||
selector: &flagSetIdCSelector,
|
||||
wantUpdates: 4,
|
||||
},
|
||||
{
|
||||
name: "no selector (all updates)",
|
||||
selector: &emptySelector,
|
||||
wantUpdates: 5,
|
||||
},
|
||||
{
|
||||
name: "flag source selector for unchanged source (initial, plus no updates)",
|
||||
selector: &sourceCSelector,
|
||||
wantUpdates: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
}
|
||||
sourceBFlags := map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "off", Metadata: model.Metadata{"flagSetId": myFlagSetId}},
|
||||
}
|
||||
sourceCFlags := map[string]model.Flag{
|
||||
"flagC": {Key: "flagC", DefaultVariant: "off"},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
// setup initial flags
|
||||
store.Update(sourceA, sourceAFlags, model.Metadata{})
|
||||
store.Update(sourceB, sourceBFlags, model.Metadata{})
|
||||
store.Update(sourceC, sourceCFlags, model.Metadata{})
|
||||
watcher := make(chan FlagQueryResult, 1)
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
store.Watch(ctx, tt.selector, watcher)
|
||||
|
||||
// perform updates
|
||||
go func() {
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// changing a flag default variant should trigger an update
|
||||
store.Update(sourceA, map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "on"},
|
||||
}, model.Metadata{})
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// changing a flag default variant should trigger an update
|
||||
store.Update(sourceB, map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on", Metadata: model.Metadata{"flagSetId": myFlagSetId}},
|
||||
}, model.Metadata{})
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// removing a flag set id should trigger an update (even for flag set id selectors; it should remove the flag from the set)
|
||||
store.Update(sourceB, map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on"},
|
||||
}, model.Metadata{})
|
||||
|
||||
time.Sleep(pauseTime)
|
||||
|
||||
// adding a flag set id should trigger an update
|
||||
store.Update(sourceB, map[string]model.Flag{
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on", Metadata: model.Metadata{"flagSetId": myFlagSetId}},
|
||||
}, model.Metadata{})
|
||||
}()
|
||||
|
||||
updates := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
assert.Equal(t, tt.wantUpdates, updates, "expected %d updates, got %d", tt.wantUpdates, updates)
|
||||
cancel()
|
||||
_, open := <-watcher
|
||||
assert.False(t, open, "watcher channel should be closed after cancel")
|
||||
return
|
||||
case q := <-watcher:
|
||||
if q.Flags != nil {
|
||||
updates++
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryMetadata(t *testing.T) {
|
||||
|
||||
sourceA := "sourceA"
|
||||
otherSource := "otherSource"
|
||||
nonExistingFlagSetId := "nonExistingFlagSetId"
|
||||
var sources = []string{sourceA}
|
||||
sourceAFlags := map[string]model.Flag{
|
||||
"flagA": {Key: "flagA", DefaultVariant: "off"},
|
||||
"flagB": {Key: "flagB", DefaultVariant: "on"},
|
||||
}
|
||||
|
||||
store, err := NewStore(logger.NewLogger(nil, false), sources)
|
||||
if err != nil {
|
||||
t.Fatalf("NewStore failed: %v", err)
|
||||
}
|
||||
|
||||
// setup initial flags
|
||||
store.Update(sourceA, sourceAFlags, model.Metadata{})
|
||||
|
||||
selector := NewSelector("source=" + otherSource + ",flagSetId=" + nonExistingFlagSetId)
|
||||
_, metadata, _ := store.GetAll(context.Background(), &selector)
|
||||
assert.Equal(t, metadata, model.Metadata{"source": otherSource, "flagSetId": nonExistingFlagSetId}, "metadata did not match expected")
|
||||
|
||||
selector = NewSelector("source=" + otherSource + ",flagSetId=" + nonExistingFlagSetId)
|
||||
_, metadata, _ = store.Get(context.Background(), "key", &selector)
|
||||
assert.Equal(t, metadata, model.Metadata{"source": otherSource, "flagSetId": nonExistingFlagSetId}, "metadata did not match expected")
|
||||
}
|
|
@ -1,20 +1,16 @@
|
|||
package blob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/utils"
|
||||
"gocloud.dev/blob"
|
||||
_ "gocloud.dev/blob/azureblob" // needed to initialize Azure Blob Storage driver
|
||||
_ "gocloud.dev/blob/gcsblob" // needed to initialize GCS driver
|
||||
_ "gocloud.dev/blob/s3blob" // needed to initialize s3 driver
|
||||
)
|
||||
|
||||
type Sync struct {
|
||||
|
@ -88,7 +84,7 @@ func (hs *Sync) sync(ctx context.Context, dataSync chan<- sync.DataSync, skipChe
|
|||
if err != nil {
|
||||
return fmt.Errorf("couldn't get object attributes: %v", err)
|
||||
}
|
||||
if hs.lastUpdated.Equal(updated) {
|
||||
if hs.lastUpdated == updated {
|
||||
hs.Logger.Debug("configuration hasn't changed, skipping fetching full object")
|
||||
return nil
|
||||
}
|
||||
|
@ -104,7 +100,7 @@ func (hs *Sync) sync(ctx context.Context, dataSync chan<- sync.DataSync, skipChe
|
|||
if !skipCheckingModTime {
|
||||
hs.lastUpdated = updated
|
||||
}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.Bucket + hs.Object}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.Bucket + hs.Object, Type: sync.ALL}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -128,20 +124,11 @@ func (hs *Sync) fetchObjectModificationTime(ctx context.Context, bucket *blob.Bu
|
|||
}
|
||||
|
||||
func (hs *Sync) fetchObject(ctx context.Context, bucket *blob.Bucket) (string, error) {
|
||||
r, err := bucket.NewReader(ctx, hs.Object, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening reader for object %s/%s: %w", hs.Bucket, hs.Object, err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
err := bucket.Download(ctx, hs.Object, buf, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error downloading object %s/%s: %w", hs.Bucket, hs.Object, err)
|
||||
}
|
||||
|
||||
json, err := utils.ConvertToJSON(data, filepath.Ext(hs.Object), r.ContentType())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error converting blob data to json: %w", err)
|
||||
}
|
||||
return json, nil
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
|
|
@ -12,32 +12,13 @@ import (
|
|||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
func TestBlobSync(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
scheme string
|
||||
bucket string
|
||||
object string
|
||||
content string
|
||||
convertedContent string
|
||||
}{
|
||||
"json file type": {
|
||||
scheme: "xyz",
|
||||
bucket: "b",
|
||||
object: "flags.json",
|
||||
content: "{\"flags\":{}}",
|
||||
convertedContent: "{\"flags\":{}}",
|
||||
},
|
||||
"yaml file type": {
|
||||
scheme: "xyz",
|
||||
bucket: "b",
|
||||
object: "flags.yaml",
|
||||
content: "flags: []",
|
||||
convertedContent: "{\"flags\":[]}",
|
||||
},
|
||||
}
|
||||
const (
|
||||
scheme = "xyz"
|
||||
bucket = "b"
|
||||
object = "o"
|
||||
)
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
func TestSync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(spec string, cmd func()) error {
|
||||
|
@ -46,12 +27,12 @@ func TestBlobSync(t *testing.T) {
|
|||
mockCron.EXPECT().Start().Times(1)
|
||||
|
||||
blobSync := &Sync{
|
||||
Bucket: tt.scheme + "://" + tt.bucket,
|
||||
Object: tt.object,
|
||||
Bucket: scheme + "://" + bucket,
|
||||
Object: object,
|
||||
Cron: mockCron,
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
blobMock := NewMockBlob(tt.scheme, func() *Sync {
|
||||
blobMock := NewMockBlob(scheme, func() *Sync {
|
||||
return blobSync
|
||||
})
|
||||
blobSync.BlobURLMux = blobMock.URLMux()
|
||||
|
@ -59,7 +40,8 @@ func TestBlobSync(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
dataSyncChan := make(chan sync.DataSync, 1)
|
||||
|
||||
blobMock.AddObject(tt.object, tt.content)
|
||||
config := "my-config"
|
||||
blobMock.AddObject(object, config)
|
||||
|
||||
go func() {
|
||||
err := blobSync.Sync(ctx, dataSyncChan)
|
||||
|
@ -70,19 +52,17 @@ func TestBlobSync(t *testing.T) {
|
|||
}()
|
||||
|
||||
data := <-dataSyncChan // initial sync
|
||||
if data.FlagData != tt.convertedContent {
|
||||
t.Errorf("expected content: %s, but received content: %s", tt.convertedContent, data.FlagData)
|
||||
if data.FlagData != config {
|
||||
t.Errorf("expected content: %s, but received content: %s", config, data.FlagData)
|
||||
}
|
||||
tickWithConfigChange(t, mockCron, dataSyncChan, blobMock, tt.object, tt.convertedContent)
|
||||
tickWithConfigChange(t, mockCron, dataSyncChan, blobMock, "new config")
|
||||
tickWithoutConfigChange(t, mockCron, dataSyncChan)
|
||||
tickWithConfigChange(t, mockCron, dataSyncChan, blobMock, tt.object, tt.convertedContent)
|
||||
tickWithConfigChange(t, mockCron, dataSyncChan, blobMock, "new config 2")
|
||||
tickWithoutConfigChange(t, mockCron, dataSyncChan)
|
||||
tickWithoutConfigChange(t, mockCron, dataSyncChan)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func tickWithConfigChange(t *testing.T, mockCron *synctesting.MockCron, dataSyncChan chan sync.DataSync, blobMock *MockBlob, object string, newConfig string) {
|
||||
func tickWithConfigChange(t *testing.T, mockCron *synctesting.MockCron, dataSyncChan chan sync.DataSync, blobMock *MockBlob, newConfig string) {
|
||||
time.Sleep(1 * time.Millisecond) // sleep so the new file has different modification date
|
||||
blobMock.AddObject(object, newConfig)
|
||||
mockCron.Tick()
|
||||
|
@ -93,7 +73,7 @@ func tickWithConfigChange(t *testing.T, mockCron *synctesting.MockCron, dataSync
|
|||
t.Errorf("expected content: %s, but received content: %s", newConfig, data.FlagData)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("data channel unexpectedly closed")
|
||||
t.Errorf("data channel unexpecdly closed")
|
||||
}
|
||||
default:
|
||||
t.Errorf("data channel has no expected update")
|
||||
|
@ -107,18 +87,13 @@ func tickWithoutConfigChange(t *testing.T, mockCron *synctesting.MockCron, dataS
|
|||
if ok {
|
||||
t.Errorf("unexpected update: %s", data.FlagData)
|
||||
} else {
|
||||
t.Errorf("data channel unexpectedly closed")
|
||||
t.Errorf("data channel unexpecdly closed")
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func TestReSync(t *testing.T) {
|
||||
const (
|
||||
scheme = "xyz"
|
||||
bucket = "b"
|
||||
object = "flags.json"
|
||||
)
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
|
||||
|
|
|
@ -31,8 +31,6 @@ const (
|
|||
syncProviderKubernetes = "kubernetes"
|
||||
syncProviderHTTP = "http"
|
||||
syncProviderGcs = "gcs"
|
||||
syncProviderAzblob = "azblob"
|
||||
syncProviderS3 = "s3"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -40,11 +38,8 @@ var (
|
|||
regURL *regexp.Regexp
|
||||
regGRPC *regexp.Regexp
|
||||
regGRPCSecure *regexp.Regexp
|
||||
regGRPCCustomResolver *regexp.Regexp
|
||||
regFile *regexp.Regexp
|
||||
regGcs *regexp.Regexp
|
||||
regAzblob *regexp.Regexp
|
||||
regS3 *regexp.Regexp
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -52,11 +47,8 @@ func init() {
|
|||
regURL = regexp.MustCompile("^https?://")
|
||||
regGRPC = regexp.MustCompile("^" + grpc.Prefix)
|
||||
regGRPCSecure = regexp.MustCompile("^" + grpc.PrefixSecure)
|
||||
regGRPCCustomResolver = regexp.MustCompile("^" + grpc.SupportedScheme)
|
||||
regFile = regexp.MustCompile("^file:")
|
||||
regGcs = regexp.MustCompile("^gs://.+?/")
|
||||
regAzblob = regexp.MustCompile("^azblob://.+?/")
|
||||
regS3 = regexp.MustCompile("^s3://.+?/")
|
||||
}
|
||||
|
||||
type ISyncBuilder interface {
|
||||
|
@ -119,18 +111,11 @@ func (sb *SyncBuilder) syncFromConfig(sourceConfig sync.SourceConfig, logger *lo
|
|||
case syncProviderGcs:
|
||||
logger.Debug(fmt.Sprintf("using blob sync-provider with gcs driver for: %s", sourceConfig.URI))
|
||||
return sb.newGcs(sourceConfig, logger), nil
|
||||
case syncProviderAzblob:
|
||||
logger.Debug(fmt.Sprintf("using blob sync-provider with azblob driver for: %s", sourceConfig.URI))
|
||||
return sb.newAzblob(sourceConfig, logger)
|
||||
case syncProviderS3:
|
||||
logger.Debug(fmt.Sprintf("using blob sync-provider with s3 driver for: %s", sourceConfig.URI))
|
||||
return sb.newS3(sourceConfig, logger), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid sync provider: %s, must be one of with "+
|
||||
"'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s' or '%s'",
|
||||
return nil, fmt.Errorf("invalid sync provider: %s, must be one of with '%s', '%s', '%s', %s', '%s' or '%s'",
|
||||
sourceConfig.Provider, syncProviderFile, syncProviderFsNotify, syncProviderFileInfo,
|
||||
syncProviderKubernetes, syncProviderHTTP, syncProviderGrpc, syncProviderGcs, syncProviderAzblob, syncProviderS3)
|
||||
syncProviderKubernetes, syncProviderHTTP, syncProviderKubernetes)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -253,71 +238,6 @@ func (sb *SyncBuilder) newGcs(config sync.SourceConfig, logger *logger.Logger) *
|
|||
}
|
||||
}
|
||||
|
||||
func (sb *SyncBuilder) newAzblob(config sync.SourceConfig, logger *logger.Logger) (*blobSync.Sync, error) {
|
||||
// Required to generate the azblob service URL
|
||||
storageAccountName := os.Getenv("AZURE_STORAGE_ACCOUNT")
|
||||
if storageAccountName == "" {
|
||||
return nil, fmt.Errorf("environment variable AZURE_STORAGE_ACCOUNT not set or is blank")
|
||||
}
|
||||
if regexp.MustCompile(`\s`).MatchString(storageAccountName) {
|
||||
return nil, fmt.Errorf("environment variable AZURE_STORAGE_ACCOUNT contains whitespace")
|
||||
}
|
||||
|
||||
// Extract bucket uri and object name from the full URI:
|
||||
// azblob://bucket/path/to/object results in azblob://bucket/ as bucketUri and
|
||||
// path/to/object as an object name.
|
||||
bucketURI := regAzblob.FindString(config.URI)
|
||||
objectName := regAzblob.ReplaceAllString(config.URI, "")
|
||||
|
||||
// Defaults to 5 seconds if interval is not set.
|
||||
var interval uint32 = 5
|
||||
if config.Interval != 0 {
|
||||
interval = config.Interval
|
||||
}
|
||||
|
||||
return &blobSync.Sync{
|
||||
Bucket: bucketURI,
|
||||
Object: objectName,
|
||||
|
||||
BlobURLMux: blob.DefaultURLMux(),
|
||||
|
||||
Logger: logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", "azblob"),
|
||||
),
|
||||
Interval: interval,
|
||||
Cron: cron.New(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *SyncBuilder) newS3(config sync.SourceConfig, logger *logger.Logger) *blobSync.Sync {
|
||||
// Extract bucket uri and object name from the full URI:
|
||||
// gs://bucket/path/to/object results in gs://bucket/ as bucketUri and
|
||||
// path/to/object as an object name.
|
||||
bucketURI := regS3.FindString(config.URI)
|
||||
objectName := regS3.ReplaceAllString(config.URI, "")
|
||||
|
||||
// Defaults to 5 seconds if interval is not set.
|
||||
var interval uint32 = 5
|
||||
if config.Interval != 0 {
|
||||
interval = config.Interval
|
||||
}
|
||||
|
||||
return &blobSync.Sync{
|
||||
Bucket: bucketURI,
|
||||
Object: objectName,
|
||||
|
||||
BlobURLMux: blob.DefaultURLMux(),
|
||||
|
||||
Logger: logger.WithFields(
|
||||
zap.String("component", "sync"),
|
||||
zap.String("sync", "s3"),
|
||||
),
|
||||
Interval: interval,
|
||||
Cron: cron.New(),
|
||||
}
|
||||
}
|
||||
|
||||
type IK8sClientBuilder interface {
|
||||
GetK8sClient() (dynamic.Interface, error)
|
||||
}
|
||||
|
|
|
@ -197,8 +197,6 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
{
|
||||
name: "combined",
|
||||
injectFunc: func(builder *SyncBuilder) {
|
||||
t.Setenv("AZURE_STORAGE_ACCOUNT", "myaccount")
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
|
||||
mockClientBuilder := buildermock.NewMockIK8sClientBuilder(ctrl)
|
||||
|
@ -238,14 +236,6 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
URI: "gs://bucket/path/to/file",
|
||||
Provider: syncProviderGcs,
|
||||
},
|
||||
{
|
||||
URI: "azblob://bucket/path/to/file",
|
||||
Provider: syncProviderAzblob,
|
||||
},
|
||||
{
|
||||
URI: "s3://bucket/path/to/file",
|
||||
Provider: syncProviderS3,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSyncs: []sync.ISync{
|
||||
|
@ -255,8 +245,6 @@ func Test_SyncsFromFromConfig(t *testing.T) {
|
|||
&file.Sync{},
|
||||
&kubernetes.Sync{},
|
||||
&blob.Sync{},
|
||||
&blob.Sync{},
|
||||
&blob.Sync{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
@ -336,144 +324,3 @@ func Test_GcsConfig(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_AzblobConfig(t *testing.T) {
|
||||
lg := logger.NewLogger(nil, false)
|
||||
defaultInterval := uint32(5)
|
||||
tests := []struct {
|
||||
name string
|
||||
uri string
|
||||
interval uint32
|
||||
storageAccount string
|
||||
expectedBucket string
|
||||
expectedObject string
|
||||
expectedInterval uint32
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
interval: 10,
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "azblob://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: 10,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "default interval",
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "azblob://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: defaultInterval,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no object set", // Blob syncer will return error when fetching
|
||||
uri: "azblob://bucket/",
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "azblob://bucket/",
|
||||
expectedObject: "",
|
||||
expectedInterval: defaultInterval,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "malformed uri", // Blob syncer will return error when opening bucket
|
||||
uri: "malformed",
|
||||
storageAccount: "myaccount",
|
||||
expectedBucket: "",
|
||||
expectedObject: "malformed",
|
||||
expectedInterval: defaultInterval,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "storage account not set", // Sync builder will fail and return error
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
storageAccount: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "storage account contains whitespace", // Sync builder will fail and return error
|
||||
uri: "azblob://bucket/path/to/object",
|
||||
storageAccount: "my account",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("AZURE_STORAGE_ACCOUNT", tt.storageAccount)
|
||||
azblobSync, err := NewSyncBuilder().newAzblob(sync.SourceConfig{
|
||||
URI: tt.uri,
|
||||
Interval: tt.interval,
|
||||
}, lg)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("newAzblob() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if (err != nil) && (tt.wantErr == true) {
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectedBucket, azblobSync.Bucket)
|
||||
require.Equal(t, tt.expectedObject, azblobSync.Object)
|
||||
require.Equal(t, int(tt.expectedInterval), int(azblobSync.Interval))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_S3Config(t *testing.T) {
|
||||
lg := logger.NewLogger(nil, false)
|
||||
defaultInterval := uint32(5)
|
||||
tests := []struct {
|
||||
name string
|
||||
uri string
|
||||
interval uint32
|
||||
expectedBucket string
|
||||
expectedObject string
|
||||
expectedInterval uint32
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
uri: "s3://bucket/path/to/object",
|
||||
interval: 10,
|
||||
expectedBucket: "s3://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: 10,
|
||||
},
|
||||
{
|
||||
name: "default interval",
|
||||
uri: "s3://bucket/path/to/object",
|
||||
expectedBucket: "s3://bucket/",
|
||||
expectedObject: "path/to/object",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
{
|
||||
name: "no object set", // Blob syncer will return error when fetching
|
||||
uri: "s3://bucket/",
|
||||
expectedBucket: "s3://bucket/",
|
||||
expectedObject: "",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
{
|
||||
name: "malformed uri", // Blob syncer will return error when opening bucket
|
||||
uri: "malformed",
|
||||
expectedBucket: "",
|
||||
expectedObject: "malformed",
|
||||
expectedInterval: defaultInterval,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s3Sync := NewSyncBuilder().newS3(sync.SourceConfig{
|
||||
URI: tt.uri,
|
||||
Interval: tt.interval,
|
||||
}, lg)
|
||||
require.Equal(t, tt.expectedBucket, s3Sync.Bucket)
|
||||
require.Equal(t, tt.expectedObject, s3Sync.Object)
|
||||
require.Equal(t, int(tt.expectedInterval), int(s3Sync.Interval))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,29 +64,14 @@ func ParseSyncProviderURIs(uris []string) ([]sync.SourceConfig, error) {
|
|||
Provider: syncProviderGrpc,
|
||||
TLS: true,
|
||||
})
|
||||
case regGRPCCustomResolver.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderGrpc,
|
||||
})
|
||||
case regGcs.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderGcs,
|
||||
})
|
||||
case regAzblob.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderAzblob,
|
||||
})
|
||||
case regS3.Match(uriB):
|
||||
syncProvidersParsed = append(syncProvidersParsed, sync.SourceConfig{
|
||||
URI: uri,
|
||||
Provider: syncProviderS3,
|
||||
})
|
||||
default:
|
||||
return syncProvidersParsed, fmt.Errorf("invalid sync uri argument: %s, must start with 'file:', "+
|
||||
"'http(s)://', 'grpc(s)://', 'gs://', 'azblob://' or 'core.openfeature.dev'", uri)
|
||||
"'http(s)://', 'grpc(s)://', 'gs://' or 'core.openfeature.dev'", uri)
|
||||
}
|
||||
}
|
||||
return syncProvidersParsed, nil
|
||||
|
|
|
@ -29,9 +29,7 @@ func TestParseSource(t *testing.T) {
|
|||
{"uri":"http://test.com","provider":"http","bearerToken":":)"},
|
||||
{"uri":"host:port","provider":"grpc"},
|
||||
{"uri":"default/my-crd","provider":"kubernetes"},
|
||||
{"uri":"gs://bucket-name/path/to/file","provider":"gcs"},
|
||||
{"uri":"azblob://bucket-name/path/to/file","provider":"azblob"},
|
||||
{"uri":"s3://bucket-name/path/to/file","provider":"s3"}
|
||||
{"uri":"gs://bucket-name/path/to/file","provider":"gcs"}
|
||||
]`,
|
||||
expectErr: false,
|
||||
out: []sync.SourceConfig{
|
||||
|
@ -56,14 +54,6 @@ func TestParseSource(t *testing.T) {
|
|||
URI: "gs://bucket-name/path/to/file",
|
||||
Provider: syncProviderGcs,
|
||||
},
|
||||
{
|
||||
URI: "azblob://bucket-name/path/to/file",
|
||||
Provider: syncProviderAzblob,
|
||||
},
|
||||
{
|
||||
URI: "s3://bucket-name/path/to/file",
|
||||
Provider: syncProviderS3,
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple-syncs-with-options": {
|
||||
|
@ -198,8 +188,6 @@ func TestParseSyncProviderURIs(t *testing.T) {
|
|||
"grpcs://secure-grpc",
|
||||
"core.openfeature.dev/default/my-crd",
|
||||
"gs://bucket-name/path/to/file",
|
||||
"azblob://bucket-name/path/to/file",
|
||||
"s3://bucket-name/path/to/file",
|
||||
},
|
||||
expectErr: false,
|
||||
out: []sync.SourceConfig{
|
||||
|
@ -229,14 +217,6 @@ func TestParseSyncProviderURIs(t *testing.T) {
|
|||
URI: "gs://bucket-name/path/to/file",
|
||||
Provider: syncProviderGcs,
|
||||
},
|
||||
{
|
||||
URI: "azblob://bucket-name/path/to/file",
|
||||
Provider: syncProviderAzblob,
|
||||
},
|
||||
{
|
||||
URI: "s3://bucket-name/path/to/file",
|
||||
Provider: syncProviderS3,
|
||||
},
|
||||
},
|
||||
},
|
||||
"empty": {
|
||||
|
|
|
@ -2,17 +2,17 @@ package file
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
msync "sync"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/utils"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -32,6 +32,8 @@ type Watcher interface {
|
|||
type Sync struct {
|
||||
URI string
|
||||
Logger *logger.Logger
|
||||
// FileType indicates the file type e.g., json, yaml/yml etc.,
|
||||
fileType string
|
||||
// watchType indicates how to watch the file FSNOTIFY|FILEINFO
|
||||
watchType string
|
||||
watcher Watcher
|
||||
|
@ -52,7 +54,7 @@ func NewFileSync(uri string, watchType string, logger *logger.Logger) *Sync {
|
|||
const defaultState = "{}"
|
||||
|
||||
func (fs *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -94,7 +96,7 @@ func (fs *Sync) setReady(val bool) {
|
|||
//nolint:funlen
|
||||
func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
defer fs.watcher.Close()
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
fs.setReady(true)
|
||||
fs.Logger.Info(fmt.Sprintf("watching filepath: %s", fs.URI))
|
||||
for {
|
||||
|
@ -108,7 +110,7 @@ func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
fs.Logger.Info(fmt.Sprintf("filepath event: %s %s", event.Name, event.Op.String()))
|
||||
switch {
|
||||
case event.Has(fsnotify.Create) || event.Has(fsnotify.Write):
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
case event.Has(fsnotify.Remove):
|
||||
// K8s exposes config maps as symlinks.
|
||||
// Updates cause a remove event, we need to re-add the watcher in this case.
|
||||
|
@ -116,20 +118,20 @@ func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
if err != nil {
|
||||
// the watcher could not be re-added, so the file must have been deleted
|
||||
fs.Logger.Error(fmt.Sprintf("error restoring watcher, file may have been deleted: %s", err.Error()))
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.DELETE, dataSync)
|
||||
continue
|
||||
}
|
||||
|
||||
// Counterintuitively, remove events are the only meaningful ones seen in K8s.
|
||||
// K8s handles mounted ConfigMap updates by modifying symbolic links, which is an atomic operation.
|
||||
// At the point the remove event is fired, we have our new data, so we can send it down the channel.
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.ALL, dataSync)
|
||||
case event.Has(fsnotify.Chmod):
|
||||
// on linux the REMOVE event will not fire until all file descriptors are closed, this cannot happen
|
||||
// while the file is being watched, os.Stat is used here to infer deletion
|
||||
if _, err := os.Stat(fs.URI); errors.Is(err, os.ErrNotExist) {
|
||||
fs.Logger.Error(fmt.Sprintf("file has been deleted: %s", err.Error()))
|
||||
fs.sendDataSync(ctx, dataSync)
|
||||
fs.sendDataSync(ctx, sync.DELETE, dataSync)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -147,8 +149,14 @@ func (fs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (fs *Sync) sendDataSync(ctx context.Context, dataSync chan<- sync.DataSync) {
|
||||
fs.Logger.Debug(fmt.Sprintf("Data sync received for %s", fs.URI))
|
||||
func (fs *Sync) sendDataSync(ctx context.Context, syncType sync.Type, dataSync chan<- sync.DataSync) {
|
||||
fs.Logger.Debug(fmt.Sprintf("Configuration %s: %s", fs.URI, syncType.String()))
|
||||
|
||||
if syncType == sync.DELETE {
|
||||
// Skip fetching and emit default state to avoid EOF errors
|
||||
dataSync <- sync.DataSync{FlagData: defaultState, Source: fs.URI, Type: syncType}
|
||||
return
|
||||
}
|
||||
|
||||
msg := defaultState
|
||||
m, err := fs.fetch(ctx)
|
||||
|
@ -161,29 +169,49 @@ func (fs *Sync) sendDataSync(ctx context.Context, dataSync chan<- sync.DataSync)
|
|||
msg = m
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: fs.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: fs.URI, Type: syncType}
|
||||
}
|
||||
|
||||
func (fs *Sync) fetch(_ context.Context) (string, error) {
|
||||
if fs.URI == "" {
|
||||
return "", errors.New("no filepath string set")
|
||||
}
|
||||
|
||||
file, err := os.Open(fs.URI)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file %s: %w", fs.URI, err)
|
||||
if fs.fileType == "" {
|
||||
uriSplit := strings.Split(fs.URI, ".")
|
||||
fs.fileType = uriSplit[len(uriSplit)-1]
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := io.ReadAll(file)
|
||||
rawFile, err := os.ReadFile(fs.URI)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading file %s: %w", fs.URI, err)
|
||||
}
|
||||
|
||||
// File extension is used to determine the content type, so media type is unnecessary
|
||||
json, err := utils.ConvertToJSON(data, filepath.Ext(fs.URI), "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error converting file content to json: %w", err)
|
||||
switch fs.fileType {
|
||||
case "yaml", "yml":
|
||||
return yamlToJSON(rawFile)
|
||||
case "json":
|
||||
return string(rawFile), nil
|
||||
default:
|
||||
return "", fmt.Errorf("filepath extension for URI: '%s' is not supported", fs.URI)
|
||||
}
|
||||
return json, nil
|
||||
}
|
||||
|
||||
// yamlToJSON is a generic helper function to convert
|
||||
// yaml to json
|
||||
func yamlToJSON(rawFile []byte) (string, error) {
|
||||
if len(rawFile) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ms map[string]interface{}
|
||||
// yaml.Unmarshal unmarshals to map[interface]interface{}
|
||||
if err := yaml.Unmarshal(rawFile, &ms); err != nil {
|
||||
return "", fmt.Errorf("unmarshal yaml: %w", err)
|
||||
}
|
||||
|
||||
r, err := json.Marshal(ms)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("convert yaml to json: %w", err)
|
||||
}
|
||||
|
||||
return string(r), err
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ func TestSimpleReSync(t *testing.T) {
|
|||
expectedDataSync := sync.DataSync{
|
||||
FlagData: "hello",
|
||||
Source: source,
|
||||
Type: sync.ALL,
|
||||
}
|
||||
handler := Sync{
|
||||
URI: source,
|
||||
|
@ -75,6 +76,7 @@ func TestSimpleSync(t *testing.T) {
|
|||
{
|
||||
FlagData: fetchFileContents,
|
||||
Source: fmt.Sprintf("%s/%s", readDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -92,10 +94,12 @@ func TestSimpleSync(t *testing.T) {
|
|||
{
|
||||
FlagData: fetchFileContents,
|
||||
Source: fmt.Sprintf("%s/%s", updateDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
{
|
||||
FlagData: "new content",
|
||||
Source: fmt.Sprintf("%s/%s", updateDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -113,10 +117,12 @@ func TestSimpleSync(t *testing.T) {
|
|||
{
|
||||
FlagData: fetchFileContents,
|
||||
Source: fmt.Sprintf("%s/%s", deleteDirName, fetchFileName),
|
||||
Type: sync.ALL,
|
||||
},
|
||||
{
|
||||
FlagData: defaultState,
|
||||
Source: fmt.Sprintf("%s/%s", deleteDirName, fetchFileName),
|
||||
Type: sync.DELETE,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -166,6 +172,9 @@ func TestSimpleSync(t *testing.T) {
|
|||
if data.Source != syncEvent.Source {
|
||||
t.Errorf("expected source: %s, but received source: %s", syncEvent.Source, data.Source)
|
||||
}
|
||||
if data.Type != syncEvent.Type {
|
||||
t.Errorf("expected type: %b, but received type: %b", syncEvent.Type, data.Type)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Errorf("event not found, timeout out after 10 seconds")
|
||||
}
|
||||
|
@ -181,7 +190,7 @@ func TestSimpleSync(t *testing.T) {
|
|||
|
||||
func TestFilePathSync_Fetch(t *testing.T) {
|
||||
successDirName := t.TempDir()
|
||||
failureDirName := t.TempDir()
|
||||
falureDirName := t.TempDir()
|
||||
tests := map[string]struct {
|
||||
fpSync Sync
|
||||
handleResponse func(t *testing.T, fetched string, err error)
|
||||
|
@ -204,9 +213,9 @@ func TestFilePathSync_Fetch(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"not found": {
|
||||
fetchDirName: failureDirName,
|
||||
fetchDirName: falureDirName,
|
||||
fpSync: Sync{
|
||||
URI: fmt.Sprintf("%s/%s", failureDirName, "not_found"),
|
||||
URI: fmt.Sprintf("%s/%s", falureDirName, "not_found"),
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
},
|
||||
handleResponse: func(t *testing.T, fetched string, err error) {
|
||||
|
@ -300,3 +309,31 @@ func writeToFile(t *testing.T, fetchDirName, fileContents string) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilePathSync_yamlToJSON(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input []byte
|
||||
handleResponse func(t *testing.T, output string, err error)
|
||||
}{
|
||||
"empty": {
|
||||
input: []byte(""),
|
||||
handleResponse: func(t *testing.T, output string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("expect no err, got err = %v", err)
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
t.Fatalf("expect output = '', got output = '%v'", output)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
output, err := yamlToJSON(tt.input)
|
||||
|
||||
tt.handleResponse(t, output, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,9 +12,7 @@ import (
|
|||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
grpccredential "github.com/open-feature/flagd/core/pkg/sync/grpc/credentials"
|
||||
_ "github.com/open-feature/flagd/core/pkg/sync/grpc/nameresolvers" // initialize custom resolvers e.g. envoy.Init()
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -22,7 +20,6 @@ const (
|
|||
// URLs for REST APIs (i.e - HTTP) from GRPC endpoints.
|
||||
Prefix = "grpc://"
|
||||
PrefixSecure = "grpcs://"
|
||||
SupportedScheme = "(envoy|dns|uds|xds)"
|
||||
|
||||
// Connection retry constants
|
||||
// Back off period is calculated with backOffBase ^ #retry-iteration. However, when #retry-iteration count reach
|
||||
|
@ -44,7 +41,6 @@ type FlagSyncServiceClientResponse interface {
|
|||
var once msync.Once
|
||||
|
||||
type Sync struct {
|
||||
GrpcDialOptionsOverride []grpc.DialOption
|
||||
CertPath string
|
||||
CredentialBuilder grpccredential.Builder
|
||||
Logger *logger.Logger
|
||||
|
@ -59,22 +55,17 @@ type Sync struct {
|
|||
}
|
||||
|
||||
func (g *Sync) Init(_ context.Context) error {
|
||||
var rpcCon *grpc.ClientConn // Reusable client connection
|
||||
var err error
|
||||
|
||||
if len(g.GrpcDialOptionsOverride) > 0 {
|
||||
g.Logger.Debug("GRPC DialOptions override provided")
|
||||
rpcCon, err = grpc.NewClient(g.URI, g.GrpcDialOptionsOverride...)
|
||||
} else {
|
||||
var tCredentials credentials.TransportCredentials
|
||||
tCredentials, err = g.CredentialBuilder.Build(g.Secure, g.CertPath)
|
||||
tCredentials, err := g.CredentialBuilder.Build(g.Secure, g.CertPath)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error building transport credentials: %w", err)
|
||||
err := fmt.Errorf("error building transport credentials: %w", err)
|
||||
g.Logger.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Derive reusable client connection
|
||||
// Set MaxMsgSize if passed
|
||||
var rpcCon *grpc.ClientConn
|
||||
|
||||
if g.MaxMsgSize > 0 {
|
||||
g.Logger.Info(fmt.Sprintf("setting max receive message size %d bytes default 4MB", g.MaxMsgSize))
|
||||
dialOptions := grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(g.MaxMsgSize))
|
||||
|
@ -82,7 +73,6 @@ func (g *Sync) Init(_ context.Context) error {
|
|||
} else {
|
||||
rpcCon, err = grpc.NewClient(g.URI, grpc.WithTransportCredentials(tCredentials))
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("error initiating grpc client connection: %w", err)
|
||||
|
@ -106,6 +96,7 @@ func (g *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error
|
|||
dataSync <- sync.DataSync{
|
||||
FlagData: res.GetFlagConfiguration(),
|
||||
Source: g.URI,
|
||||
Type: sync.ALL,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -200,9 +191,9 @@ func (g *Sync) handleFlagSync(stream syncv1grpc.FlagSyncService_SyncFlagsClient,
|
|||
|
||||
dataSync <- sync.DataSync{
|
||||
FlagData: data.FlagConfiguration,
|
||||
SyncContext: data.SyncContext,
|
||||
Source: g.URI,
|
||||
Selector: g.Selector,
|
||||
Type: sync.ALL,
|
||||
}
|
||||
|
||||
g.Logger.Debug("received full configuration payload")
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
credendialsmock "github.com/open-feature/flagd/core/pkg/sync/grpc/credentials/mock"
|
||||
grpcmock "github.com/open-feature/flagd/core/pkg/sync/grpc/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"go.uber.org/zap"
|
||||
|
@ -25,8 +26,6 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
func Test_InitWithMockCredentialBuilder(t *testing.T) {
|
||||
|
@ -123,6 +122,7 @@ func Test_ReSyncTests(t *testing.T) {
|
|||
notifications: []sync.DataSync{
|
||||
{
|
||||
FlagData: "success",
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
shouldError: false,
|
||||
|
@ -179,6 +179,9 @@ func Test_ReSyncTests(t *testing.T) {
|
|||
|
||||
for _, expected := range test.notifications {
|
||||
out := <-syncChan
|
||||
if expected.Type != out.Type {
|
||||
t.Errorf("Returned sync type = %v, wanted %v", out.Type, expected.Type)
|
||||
}
|
||||
|
||||
if expected.FlagData != out.FlagData {
|
||||
t.Errorf("Returned sync data = %v, wanted %v", out.FlagData, expected.FlagData)
|
||||
|
@ -192,14 +195,98 @@ func Test_ReSyncTests(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSync_BasicFlagSyncStates(t *testing.T) {
|
||||
grpcSyncImpl := Sync{
|
||||
URI: "grpc://test",
|
||||
ProviderID: "",
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
|
||||
mockError := errors.New("could not sync")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stream syncv1grpc.FlagSyncService_SyncFlagsClient
|
||||
setup func(t *testing.T, client *grpcmock.MockFlagSyncServiceClient, clientResponse *grpcmock.MockFlagSyncServiceClientResponse)
|
||||
want sync.Type
|
||||
wantError error
|
||||
ready bool
|
||||
}{
|
||||
{
|
||||
name: "State All maps to Sync All",
|
||||
setup: func(t *testing.T, client *grpcmock.MockFlagSyncServiceClient, clientResponse *grpcmock.MockFlagSyncServiceClientResponse) {
|
||||
client.EXPECT().SyncFlags(gomock.Any(), gomock.Any(), gomock.Any()).Return(clientResponse, nil)
|
||||
gomock.InOrder(
|
||||
clientResponse.EXPECT().Recv().Return(
|
||||
&v1.SyncFlagsResponse{
|
||||
FlagConfiguration: "{}",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
clientResponse.EXPECT().Recv().Return(
|
||||
nil, io.EOF,
|
||||
),
|
||||
)
|
||||
},
|
||||
want: sync.ALL,
|
||||
ready: true,
|
||||
},
|
||||
{
|
||||
name: "Error during flag sync",
|
||||
setup: func(t *testing.T, client *grpcmock.MockFlagSyncServiceClient, clientResponse *grpcmock.MockFlagSyncServiceClientResponse) {
|
||||
client.EXPECT().SyncFlags(gomock.Any(), gomock.Any(), gomock.Any()).Return(clientResponse, nil)
|
||||
clientResponse.EXPECT().Recv().Return(
|
||||
nil,
|
||||
mockError,
|
||||
)
|
||||
},
|
||||
ready: true,
|
||||
want: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
syncChan := make(chan sync.DataSync, 1)
|
||||
|
||||
mockClient := grpcmock.NewMockFlagSyncServiceClient(ctrl)
|
||||
mockClientResponse := grpcmock.NewMockFlagSyncServiceClientResponse(ctrl)
|
||||
test.setup(t, mockClient, mockClientResponse)
|
||||
|
||||
waitChan := make(chan struct{})
|
||||
go func() {
|
||||
grpcSyncImpl.client = mockClient
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
err := grpcSyncImpl.Sync(ctx, syncChan)
|
||||
if err != nil {
|
||||
t.Errorf("Error handling flag sync: %v", err)
|
||||
}
|
||||
close(waitChan)
|
||||
}()
|
||||
<-waitChan
|
||||
|
||||
if test.want < 0 {
|
||||
require.Empty(t, syncChan)
|
||||
return
|
||||
}
|
||||
data := <-syncChan
|
||||
|
||||
if grpcSyncImpl.IsReady() != test.ready {
|
||||
t.Errorf("expected grpcSyncImpl.ready to be: '%v', got: '%v'", test.ready, grpcSyncImpl.ready)
|
||||
}
|
||||
|
||||
if data.Type != test.want {
|
||||
t.Errorf("Returned data sync state = %v, wanted %v", data.Type, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_StreamListener(t *testing.T) {
|
||||
const target = "localBufCon"
|
||||
|
||||
metadata, err := structpb.NewStruct(map[string]any{"sources": "A,B,C"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sync context: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input []serverPayload
|
||||
|
@ -215,7 +302,7 @@ func Test_StreamListener(t *testing.T) {
|
|||
output: []sync.DataSync{
|
||||
{
|
||||
FlagData: "{\"flags\": {}}",
|
||||
SyncContext: metadata,
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -232,11 +319,11 @@ func Test_StreamListener(t *testing.T) {
|
|||
output: []sync.DataSync{
|
||||
{
|
||||
FlagData: "{}",
|
||||
SyncContext: metadata,
|
||||
Type: sync.ALL,
|
||||
},
|
||||
{
|
||||
FlagData: "{\"flags\": {}}",
|
||||
SyncContext: metadata,
|
||||
Type: sync.ALL,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -289,12 +376,12 @@ func Test_StreamListener(t *testing.T) {
|
|||
for _, expected := range test.output {
|
||||
out := <-syncChan
|
||||
|
||||
if expected.FlagData != out.FlagData {
|
||||
t.Errorf("Returned sync data = %v, wanted %v", out.FlagData, expected.FlagData)
|
||||
if expected.Type != out.Type {
|
||||
t.Errorf("Returned sync type = %v, wanted %v", out.Type, expected.Type)
|
||||
}
|
||||
|
||||
if !proto.Equal(expected.SyncContext, out.SyncContext) {
|
||||
t.Errorf("Returned sync context = %v, wanted = %v", out.SyncContext, expected.SyncContext)
|
||||
if expected.FlagData != out.FlagData {
|
||||
t.Errorf("Returned sync data = %v, wanted %v", out.FlagData, expected.FlagData)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -379,7 +466,8 @@ func Test_SyncRetry(t *testing.T) {
|
|||
// Setup
|
||||
target := "grpc://local"
|
||||
bufListener := bufconn.Listen(1)
|
||||
emptyFlagData := "{}"
|
||||
|
||||
expectType := sync.ALL
|
||||
|
||||
// buffer based server. response ignored purposefully
|
||||
bServer := bufferedServer{listener: bufListener, mockResponses: []serverPayload{
|
||||
|
@ -433,7 +521,7 @@ func Test_SyncRetry(t *testing.T) {
|
|||
t.Errorf("timeout waiting for conditions to fulfil")
|
||||
break
|
||||
case data := <-syncChan:
|
||||
if data.FlagData != emptyFlagData {
|
||||
if data.Type != expectType {
|
||||
t.Errorf("sync start error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -453,9 +541,9 @@ func Test_SyncRetry(t *testing.T) {
|
|||
case <-tCtx.Done():
|
||||
cancelFunc()
|
||||
t.Error("timeout waiting for conditions to fulfil")
|
||||
case data := <-syncChan:
|
||||
if data.FlagData != emptyFlagData {
|
||||
t.Errorf("sync start error: %s", err.Error())
|
||||
case rsp := <-syncChan:
|
||||
if rsp.Type != expectType {
|
||||
t.Errorf("expected response: %s, but got: %s", expectType, rsp.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -487,10 +575,8 @@ type bufferedServer struct {
|
|||
|
||||
func (b *bufferedServer) SyncFlags(_ *v1.SyncFlagsRequest, stream syncv1grpc.FlagSyncService_SyncFlagsServer) error {
|
||||
for _, response := range b.mockResponses {
|
||||
metadata, _ := structpb.NewStruct(map[string]any{"sources": "A,B,C"})
|
||||
err := stream.Send(&v1.SyncFlagsResponse{
|
||||
FlagConfiguration: response.flags,
|
||||
SyncContext: metadata,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Error with stream: %s", err.Error())
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
package nameresolvers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "envoy"
|
||||
|
||||
type envoyBuilder struct{}
|
||||
|
||||
// Build A custom NameResolver to resolve gRPC target uri for envoy in the
|
||||
// format of.
|
||||
//
|
||||
// Custom URI Scheme:
|
||||
//
|
||||
// envoy://[proxy-agent-host]:[proxy-agent-port]/[service-name]
|
||||
func (*envoyBuilder) Build(target resolver.Target,
|
||||
cc resolver.ClientConn, _ resolver.BuildOptions,
|
||||
) (resolver.Resolver, error) {
|
||||
_, err := isValidTarget(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &envoyResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (*envoyBuilder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
type envoyResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
// Envoy NameResolver, will always override the authority with the specified authority i.e. URL.path and
|
||||
// use the socketAddress i.e. Host:Port to connect.
|
||||
func (r *envoyResolver) start() {
|
||||
addr := fmt.Sprintf("%s:%s", r.target.URL.Hostname(), r.target.URL.Port())
|
||||
err := r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (*envoyResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*envoyResolver) Close() {}
|
||||
|
||||
// Validate user specified target
|
||||
//
|
||||
// Sample target string: envoy://localhost:9211/test.service
|
||||
//
|
||||
// return `true` if the target string used match the scheme and format
|
||||
func isValidTarget(target resolver.Target) (bool, error) {
|
||||
// make sure and host and port not empty
|
||||
// used as resolver.Address
|
||||
if target.URL.Scheme != "envoy" || target.URL.Hostname() == "" || target.URL.Port() == "" {
|
||||
return false, fmt.Errorf("envoy-resolver: invalid scheme or missing host/port, target: %s",
|
||||
target)
|
||||
}
|
||||
|
||||
// make sure the path is valid
|
||||
// used as :authority e.g. test.service
|
||||
path := target.Endpoint()
|
||||
if path == "" || strings.Contains(path, "/") {
|
||||
return false, fmt.Errorf("envoy-resolver: invalid path %s", path)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&envoyBuilder{})
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
package nameresolvers
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func Test_EnvoyTargetString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mockURL url.URL
|
||||
mockError string
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "Should be valid string",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "Should be valid scheme",
|
||||
mockURL: url.URL{
|
||||
Scheme: "invalid",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: invalid://localhost:8080/test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Should be valid path",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service/test",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid path test.service/test",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Should be valid path",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost:8080",
|
||||
Path: "/test.service/",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid path test.service/",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Hostname should not be empty",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: ":8080",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: envoy://:8080/test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Port should not be empty",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Host: "localhost",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: envoy://localhost/test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "Hostname and Port should not be empty",
|
||||
mockURL: url.URL{
|
||||
Scheme: "envoy",
|
||||
Path: "/test.service",
|
||||
},
|
||||
mockError: "envoy-resolver: invalid scheme or missing host/port, target: envoy:///test.service",
|
||||
shouldError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
target := resolver.Target{URL: test.mockURL}
|
||||
|
||||
isValid, err := isValidTarget(target)
|
||||
|
||||
if test.shouldError {
|
||||
require.False(t, isValid, "Should not be valid")
|
||||
require.NotNilf(t, err, "Error should not be nil")
|
||||
require.Containsf(t, err.Error(), test.mockError, "Error should contains %s", test.mockError)
|
||||
} else {
|
||||
require.True(t, isValid, "Should be valid")
|
||||
require.NoErrorf(t, err, "Error should be nil")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -8,12 +8,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
parseUrl "net/url"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/open-feature/flagd/core/pkg/logger"
|
||||
"github.com/open-feature/flagd/core/pkg/sync"
|
||||
"github.com/open-feature/flagd/core/pkg/utils"
|
||||
"golang.org/x/crypto/sha3" //nolint:gosec
|
||||
)
|
||||
|
||||
|
@ -27,7 +24,6 @@ type Sync struct {
|
|||
AuthHeader string
|
||||
Interval uint32
|
||||
ready bool
|
||||
eTag string
|
||||
}
|
||||
|
||||
// Client defines the behaviour required of a http client
|
||||
|
@ -43,11 +39,11 @@ type Cron interface {
|
|||
}
|
||||
|
||||
func (hs *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
msg, _, err := hs.fetchBody(ctx, true)
|
||||
msg, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI, Type: sync.ALL}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -64,7 +60,7 @@ func (hs *Sync) IsReady() bool {
|
|||
|
||||
func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
||||
// Initial fetch
|
||||
fetch, _, err := hs.fetchBody(ctx, true)
|
||||
fetch, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,30 +71,43 @@ func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
hs.Logger.Debug(fmt.Sprintf("polling %s every %d seconds", hs.URI, hs.Interval))
|
||||
_ = hs.Cron.AddFunc(fmt.Sprintf("*/%d * * * *", hs.Interval), func() {
|
||||
hs.Logger.Debug(fmt.Sprintf("fetching configuration from %s", hs.URI))
|
||||
previousBodySHA := hs.LastBodySHA
|
||||
body, noChange, err := hs.fetchBody(ctx, false)
|
||||
body, err := hs.fetchBodyFromURL(ctx, hs.URI)
|
||||
if err != nil {
|
||||
hs.Logger.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if len(body) == 0 {
|
||||
hs.Logger.Debug("configuration deleted")
|
||||
} else {
|
||||
if hs.LastBodySHA == "" {
|
||||
hs.Logger.Debug("new configuration created")
|
||||
msg, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error fetching: %s", err.Error()))
|
||||
return
|
||||
} else {
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI, Type: sync.ALL}
|
||||
}
|
||||
} else {
|
||||
currentSHA := hs.generateSha(body)
|
||||
if hs.LastBodySHA != currentSHA {
|
||||
hs.Logger.Debug("configuration modified")
|
||||
msg, err := hs.Fetch(ctx)
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error fetching: %s", err.Error()))
|
||||
} else {
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: hs.URI, Type: sync.ALL}
|
||||
}
|
||||
}
|
||||
|
||||
if body == "" && !noChange {
|
||||
hs.Logger.Debug("configuration deleted")
|
||||
return
|
||||
hs.LastBodySHA = currentSHA
|
||||
}
|
||||
|
||||
if previousBodySHA == "" {
|
||||
hs.Logger.Debug("configuration created")
|
||||
dataSync <- sync.DataSync{FlagData: body, Source: hs.URI}
|
||||
} else if previousBodySHA != hs.LastBodySHA {
|
||||
hs.Logger.Debug("configuration updated")
|
||||
dataSync <- sync.DataSync{FlagData: body, Source: hs.URI}
|
||||
}
|
||||
})
|
||||
|
||||
hs.Cron.Start()
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: hs.URI}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: hs.URI, Type: sync.ALL}
|
||||
|
||||
<-ctx.Done()
|
||||
hs.Cron.Stop()
|
||||
|
@ -106,18 +115,13 @@ func (hs *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (hs *Sync) fetchBody(ctx context.Context, fetchAll bool) (string, bool, error) {
|
||||
if hs.URI == "" {
|
||||
return "", false, errors.New("no HTTP URL string set")
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", hs.URI, bytes.NewBuffer(nil))
|
||||
func (hs *Sync) fetchBodyFromURL(ctx context.Context, url string) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, bytes.NewBuffer(nil))
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("error creating request for url %s: %w", hs.URI, err)
|
||||
return nil, fmt.Errorf("error creating request for url %s: %w", url, err)
|
||||
}
|
||||
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Accept", "application/yaml")
|
||||
|
||||
if hs.AuthHeader != "" {
|
||||
req.Header.Set("Authorization", hs.AuthHeader)
|
||||
|
@ -126,60 +130,23 @@ func (hs *Sync) fetchBody(ctx context.Context, fetchAll bool) (string, bool, err
|
|||
req.Header.Set("Authorization", bearer)
|
||||
}
|
||||
|
||||
if hs.eTag != "" && !fetchAll {
|
||||
req.Header.Set("If-None-Match", hs.eTag)
|
||||
}
|
||||
|
||||
resp, err := hs.Client.Do(req)
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("error calling endpoint %s: %w", hs.URI, err)
|
||||
return nil, fmt.Errorf("error calling endpoint %s: %w", url, err)
|
||||
}
|
||||
defer func() {
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
hs.Logger.Error(fmt.Sprintf("error closing the response body: %s", err.Error()))
|
||||
hs.Logger.Debug(fmt.Sprintf("error closing the response body: %s", err.Error()))
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode == 304 {
|
||||
hs.Logger.Debug("no changes detected")
|
||||
return "", true, nil
|
||||
}
|
||||
|
||||
statusOK := resp.StatusCode >= 200 && resp.StatusCode < 300
|
||||
if !statusOK {
|
||||
return "", false, fmt.Errorf("error fetching from url %s: %s", hs.URI, resp.Status)
|
||||
}
|
||||
|
||||
if resp.Header.Get("ETag") != "" {
|
||||
hs.eTag = resp.Header.Get("ETag")
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("unable to read body to bytes: %w", err)
|
||||
return nil, fmt.Errorf("unable to read body to bytes: %w", err)
|
||||
}
|
||||
|
||||
json, err := utils.ConvertToJSON(body, getFileExtensions(hs.URI), resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("error converting response body to json: %w", err)
|
||||
}
|
||||
|
||||
if json != "" {
|
||||
hs.LastBodySHA = hs.generateSha([]byte(body))
|
||||
}
|
||||
|
||||
return json, false, nil
|
||||
}
|
||||
|
||||
// getFileExtensions returns the file extension from the URL path
|
||||
func getFileExtensions(url string) string {
|
||||
u, err := parseUrl.Parse(url)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return filepath.Ext(u.Path)
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (hs *Sync) generateSha(body []byte) string {
|
||||
|
@ -189,6 +156,17 @@ func (hs *Sync) generateSha(body []byte) string {
|
|||
}
|
||||
|
||||
func (hs *Sync) Fetch(ctx context.Context) (string, error) {
|
||||
body, _, err := hs.fetchBody(ctx, false)
|
||||
return body, err
|
||||
if hs.URI == "" {
|
||||
return "", errors.New("no HTTP URL string set")
|
||||
}
|
||||
|
||||
body, err := hs.fetchBodyFromURL(ctx, hs.URI)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(body) != 0 {
|
||||
hs.LastBodySHA = hs.generateSha(body)
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -18,23 +19,19 @@ import (
|
|||
|
||||
func TestSimpleSync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
resp := "test response"
|
||||
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(_ string, _ func()) error {
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(spec string, cmd func()) error {
|
||||
return nil
|
||||
})
|
||||
mockCron.EXPECT().Start().Times(1)
|
||||
|
||||
mockClient := syncmock.NewMockClient(ctrl)
|
||||
responseBody := "test response"
|
||||
resp := &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader(responseBody)),
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(resp, nil)
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(&http.Response{Body: io.NopCloser(strings.NewReader(resp))}, nil)
|
||||
|
||||
httpSync := Sync{
|
||||
URI: "http://localhost/flags",
|
||||
URI: "http://localhost",
|
||||
Client: mockClient,
|
||||
Cron: mockCron,
|
||||
LastBodySHA: "",
|
||||
|
@ -54,51 +51,8 @@ func TestSimpleSync(t *testing.T) {
|
|||
|
||||
data := <-dataSyncChan
|
||||
|
||||
if data.FlagData != responseBody {
|
||||
t.Errorf("expected content: %s, but received content: %s", responseBody, data.FlagData)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionWithQSSync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockCron := synctesting.NewMockCron(ctrl)
|
||||
mockCron.EXPECT().AddFunc(gomock.Any(), gomock.Any()).DoAndReturn(func(_ string, _ func()) error {
|
||||
return nil
|
||||
})
|
||||
mockCron.EXPECT().Start().Times(1)
|
||||
|
||||
mockClient := syncmock.NewMockClient(ctrl)
|
||||
responseBody := "test response"
|
||||
resp := &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader(responseBody)),
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(resp, nil)
|
||||
|
||||
httpSync := Sync{
|
||||
URI: "http://localhost/flags.json?env=dev",
|
||||
Client: mockClient,
|
||||
Cron: mockCron,
|
||||
LastBodySHA: "",
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
dataSyncChan := make(chan sync.DataSync)
|
||||
|
||||
go func() {
|
||||
err := httpSync.Sync(ctx, dataSyncChan)
|
||||
if err != nil {
|
||||
log.Fatalf("Error start sync: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
data := <-dataSyncChan
|
||||
|
||||
if data.FlagData != responseBody {
|
||||
t.Errorf("expected content: %s, but received content: %s", responseBody, data.FlagData)
|
||||
if data.FlagData != resp {
|
||||
t.Errorf("expected content: %s, but received content: %s", resp, data.FlagData)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,16 +64,13 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
uri string
|
||||
bearerToken string
|
||||
authHeader string
|
||||
eTagHeader string
|
||||
lastBodySHA string
|
||||
handleResponse func(*testing.T, Sync, string, error)
|
||||
}{
|
||||
"success": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil)
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -134,19 +85,17 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
},
|
||||
},
|
||||
"return an error if no uri": {
|
||||
setup: func(_ *testing.T, _ *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, _ string, err error) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, fetched string, err error) {
|
||||
if err == nil {
|
||||
t.Error("expected err, got nil")
|
||||
}
|
||||
},
|
||||
},
|
||||
"update last body sha": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil)
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -172,11 +121,7 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
if actualAuthHeader != "Bearer "+expectedToken {
|
||||
t.Fatalf("expected Authorization header to be 'Bearer %s', got %s", expectedToken, actualAuthHeader)
|
||||
}
|
||||
return &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil
|
||||
return &http.Response{Body: io.NopCloser(strings.NewReader("test response"))}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -203,11 +148,7 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
if actualAuthHeader != expectedHeader {
|
||||
t.Fatalf("expected Authorization header to be '%s', got %s", expectedHeader, actualAuthHeader)
|
||||
}
|
||||
return &http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil
|
||||
return &http.Response{Body: io.NopCloser(strings.NewReader("test response"))}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
|
@ -226,100 +167,6 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
}
|
||||
},
|
||||
},
|
||||
"unauthorized request": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
}, nil)
|
||||
},
|
||||
uri: "http://localhost",
|
||||
handleResponse: func(t *testing.T, _ Sync, _ string, err error) {
|
||||
if err == nil {
|
||||
t.Fatalf("expected unauthorized request to return an error")
|
||||
}
|
||||
},
|
||||
},
|
||||
"not modified response etag matched": {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
expectedIfNoneMatch := `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`
|
||||
client.EXPECT().Do(gomock.Any()).DoAndReturn(func(req *http.Request) (*http.Response, error) {
|
||||
actualIfNoneMatch := req.Header.Get("If-None-Match")
|
||||
if actualIfNoneMatch != expectedIfNoneMatch {
|
||||
t.Fatalf("expected If-None-Match header to be '%s', got %s", expectedIfNoneMatch, actualIfNoneMatch)
|
||||
}
|
||||
return &http.Response{
|
||||
Header: map[string][]string{"ETag": {expectedIfNoneMatch}},
|
||||
Body: io.NopCloser(strings.NewReader("")),
|
||||
StatusCode: http.StatusNotModified,
|
||||
}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
eTagHeader: `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`,
|
||||
handleResponse: func(t *testing.T, httpSync Sync, _ string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("fetch: %v", err)
|
||||
}
|
||||
|
||||
expectedLastBodySHA := ""
|
||||
expectedETag := `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`
|
||||
if httpSync.LastBodySHA != expectedLastBodySHA {
|
||||
t.Errorf(
|
||||
"expected last body sha to be: '%s', got: '%s'", expectedLastBodySHA, httpSync.LastBodySHA,
|
||||
)
|
||||
}
|
||||
if httpSync.eTag != expectedETag {
|
||||
t.Errorf(
|
||||
"expected last etag to be: '%s', got: '%s'", expectedETag, httpSync.eTag,
|
||||
)
|
||||
}
|
||||
},
|
||||
},
|
||||
"modified response etag mismatched": {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
expectedIfNoneMatch := `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`
|
||||
client.EXPECT().Do(gomock.Any()).DoAndReturn(func(req *http.Request) (*http.Response, error) {
|
||||
actualIfNoneMatch := req.Header.Get("If-None-Match")
|
||||
if actualIfNoneMatch != expectedIfNoneMatch {
|
||||
t.Fatalf("expected If-None-Match header to be '%s', got %s", expectedIfNoneMatch, actualIfNoneMatch)
|
||||
}
|
||||
|
||||
newContent := "\"Hey there!\""
|
||||
newETag := `"c2e01ce63d90109c4c7f4f6dcea97ed1bb2b51e3647f36caf5acbe27413a24bb"`
|
||||
|
||||
return &http.Response{
|
||||
Header: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Etag": {newETag},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader(newContent)),
|
||||
StatusCode: http.StatusOK,
|
||||
}, nil
|
||||
})
|
||||
},
|
||||
uri: "http://localhost",
|
||||
eTagHeader: `"1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"`,
|
||||
handleResponse: func(t *testing.T, httpSync Sync, _ string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("fetch: %v", err)
|
||||
}
|
||||
|
||||
expectedLastBodySHA := "wuAc5j2QEJxMf09tzql-0bsrUeNkfzbK9ay-J0E6JLs="
|
||||
expectedETag := `"c2e01ce63d90109c4c7f4f6dcea97ed1bb2b51e3647f36caf5acbe27413a24bb"`
|
||||
if httpSync.LastBodySHA != expectedLastBodySHA {
|
||||
t.Errorf(
|
||||
"expected last body sha to be: '%s', got: '%s'", expectedLastBodySHA, httpSync.LastBodySHA,
|
||||
)
|
||||
}
|
||||
if httpSync.eTag != expectedETag {
|
||||
t.Errorf(
|
||||
"expected last etag to be: '%s', got: '%s'", expectedETag, httpSync.eTag,
|
||||
)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
|
@ -335,7 +182,6 @@ func TestHTTPSync_Fetch(t *testing.T) {
|
|||
AuthHeader: tt.authHeader,
|
||||
LastBodySHA: tt.lastBodySHA,
|
||||
Logger: logger.NewLogger(nil, false),
|
||||
eTag: tt.eTagHeader,
|
||||
}
|
||||
|
||||
fetched, err := httpSync.Fetch(context.Background())
|
||||
|
@ -369,8 +215,6 @@ func TestSync_Init(t *testing.T) {
|
|||
|
||||
func TestHTTPSync_Resync(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
source := "http://localhost"
|
||||
emptyFlagData := "{}"
|
||||
|
||||
tests := map[string]struct {
|
||||
setup func(t *testing.T, client *syncmock.MockClient)
|
||||
|
@ -382,14 +226,12 @@ func TestHTTPSync_Resync(t *testing.T) {
|
|||
wantNotifications []sync.DataSync
|
||||
}{
|
||||
"success": {
|
||||
setup: func(_ *testing.T, client *syncmock.MockClient) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {
|
||||
client.EXPECT().Do(gomock.Any()).Return(&http.Response{
|
||||
Header: map[string][]string{"Content-Type": {"application/json"}},
|
||||
Body: io.NopCloser(strings.NewReader(emptyFlagData)),
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
}, nil)
|
||||
},
|
||||
uri: source,
|
||||
uri: "http://localhost",
|
||||
handleResponse: func(t *testing.T, _ Sync, fetched string, err error) {
|
||||
if err != nil {
|
||||
t.Fatalf("fetch: %v", err)
|
||||
|
@ -402,14 +244,15 @@ func TestHTTPSync_Resync(t *testing.T) {
|
|||
wantErr: false,
|
||||
wantNotifications: []sync.DataSync{
|
||||
{
|
||||
FlagData: emptyFlagData,
|
||||
Source: source,
|
||||
Type: sync.ALL,
|
||||
FlagData: "",
|
||||
Source: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
"error response": {
|
||||
setup: func(_ *testing.T, _ *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, _ string, err error) {
|
||||
setup: func(t *testing.T, client *syncmock.MockClient) {},
|
||||
handleResponse: func(t *testing.T, _ Sync, fetched string, err error) {
|
||||
if err == nil {
|
||||
t.Error("expected err, got nil")
|
||||
}
|
||||
|
@ -445,8 +288,8 @@ func TestHTTPSync_Resync(t *testing.T) {
|
|||
for _, dataSync := range tt.wantNotifications {
|
||||
select {
|
||||
case x := <-d:
|
||||
if x.FlagData != dataSync.FlagData || x.Source != dataSync.Source {
|
||||
t.Errorf("unexpected datasync received %v vs %v", x, dataSync)
|
||||
if !reflect.DeepEqual(x.String(), dataSync.String()) {
|
||||
t.Error("unexpected datasync received", x, dataSync)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("expected datasync not received", dataSync)
|
||||
|
|
|
@ -2,10 +2,37 @@ package sync
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
type Type int
|
||||
|
||||
// Type of the sync operation
|
||||
const (
|
||||
// ALL - All flags of sync provider. This is the default if unset due to primitive default
|
||||
ALL Type = iota
|
||||
// ADD - Additional flags from sync provider
|
||||
ADD
|
||||
// UPDATE - Update for flag(s) previously provided
|
||||
UPDATE
|
||||
// DELETE - Delete for flag(s) previously provided
|
||||
DELETE
|
||||
)
|
||||
|
||||
func (t Type) String() string {
|
||||
switch t {
|
||||
case ALL:
|
||||
return "ALL"
|
||||
case ADD:
|
||||
return "ADD"
|
||||
case UPDATE:
|
||||
return "UPDATE"
|
||||
case DELETE:
|
||||
return "DELETE"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
ISync implementations watch for changes in the flag sources (HTTP backend, local file, K8s CRDs ...),fetch the latest
|
||||
value and communicate to the Runtime with DataSync channel
|
||||
|
@ -29,9 +56,9 @@ type ISync interface {
|
|||
// DataSync is the data contract between Runtime and sync implementations
|
||||
type DataSync struct {
|
||||
FlagData string
|
||||
SyncContext *structpb.Struct
|
||||
Source string
|
||||
Selector string
|
||||
Type
|
||||
}
|
||||
|
||||
// SourceConfig is configuration option for flagd. This maps to startup parameter sources
|
||||
|
|
|
@ -57,7 +57,7 @@ func (k *Sync) ReSync(ctx context.Context, dataSync chan<- sync.DataSync) error
|
|||
if err != nil {
|
||||
return fmt.Errorf("unable to fetch flag configuration: %w", err)
|
||||
}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI, Type: sync.ALL}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func (k *Sync) Sync(ctx context.Context, dataSync chan<- sync.DataSync) error {
|
|||
return err
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: fetch, Source: k.URI, Type: sync.ALL}
|
||||
|
||||
notifies := make(chan INotify)
|
||||
|
||||
|
@ -136,7 +136,7 @@ func (k *Sync) watcher(ctx context.Context, notifies chan INotify, dataSync chan
|
|||
continue
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI, Type: sync.ALL}
|
||||
case DefaultEventTypeModify:
|
||||
k.logger.Debug("Configuration modified")
|
||||
msg, err := k.fetch(ctx)
|
||||
|
@ -145,7 +145,7 @@ func (k *Sync) watcher(ctx context.Context, notifies chan INotify, dataSync chan
|
|||
continue
|
||||
}
|
||||
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI}
|
||||
dataSync <- sync.DataSync{FlagData: msg, Source: k.URI, Type: sync.ALL}
|
||||
case DefaultEventTypeDelete:
|
||||
k.logger.Debug("configuration deleted")
|
||||
case DefaultEventTypeReady:
|
||||
|
|
|
@ -607,7 +607,6 @@ func TestInit(t *testing.T) {
|
|||
func TestSync_ReSync(t *testing.T) {
|
||||
const name = "myFF"
|
||||
const ns = "myNS"
|
||||
const payload = "{\"flags\":null}"
|
||||
s := runtime.NewScheme()
|
||||
ff := &unstructured.Unstructured{}
|
||||
ff.SetUnstructuredContent(getCFG(name, ns))
|
||||
|
@ -669,8 +668,8 @@ func TestSync_ReSync(t *testing.T) {
|
|||
i := tt.countMsg
|
||||
for i > 0 {
|
||||
d := <-dataChannel
|
||||
if d.FlagData != payload {
|
||||
t.Errorf("Expected %v, got %v", payload, d.FlagData)
|
||||
if d.Type != sync.ALL {
|
||||
t.Errorf("Expected %v, got %v", sync.ALL, d)
|
||||
}
|
||||
i--
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
@ -30,6 +30,7 @@ import (
|
|||
|
||||
const (
|
||||
metricsExporterOtel = "otel"
|
||||
exportInterval = 2 * time.Second
|
||||
)
|
||||
|
||||
type CollectorConfig struct {
|
||||
|
@ -144,7 +145,7 @@ func buildTransportCredentials(_ context.Context, cfg CollectorConfig) (credenti
|
|||
|
||||
tlsConfig := &tls.Config{
|
||||
RootCAs: capool,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
MinVersion: tls.VersionTLS13,
|
||||
GetCertificate: func(chi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certs, err := reloader.GetCertificate()
|
||||
if err != nil {
|
||||
|
@ -195,7 +196,7 @@ func buildMetricReader(ctx context.Context, cfg Config) (metric.Reader, error) {
|
|||
return nil, fmt.Errorf("error creating otel metric exporter: %w", err)
|
||||
}
|
||||
|
||||
return metric.NewPeriodicReader(otelExporter), nil
|
||||
return metric.NewPeriodicReader(otelExporter, metric.WithInterval(exportInterval)), nil
|
||||
}
|
||||
|
||||
// buildOtlpExporter is a helper to build grpc backed otlp trace exporter
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest/observer"
|
||||
)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
msdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -19,15 +19,15 @@ const (
|
|||
FeatureFlagReasonKey = attribute.Key("feature_flag.reason")
|
||||
ExceptionTypeKey = attribute.Key("ExceptionTypeKeyName")
|
||||
|
||||
httpRequestDurationMetric = "http.server.request.duration"
|
||||
httpResponseSizeMetric = "http.server.response.body.size"
|
||||
httpRequestDurationMetric = "http.server.duration"
|
||||
httpResponseSizeMetric = "http.server.response.size"
|
||||
httpActiveRequestsMetric = "http.server.active_requests"
|
||||
impressionMetric = "feature_flag." + ProviderName + ".impression"
|
||||
reasonMetric = "feature_flag." + ProviderName + ".result.reason"
|
||||
reasonMetric = "feature_flag." + ProviderName + ".evaluation.reason"
|
||||
)
|
||||
|
||||
type IMetricsRecorder interface {
|
||||
HTTPAttributes(svcName, url, method, code, scheme string) []attribute.KeyValue
|
||||
HTTPAttributes(svcName, url, method, code string) []attribute.KeyValue
|
||||
HTTPRequestDuration(ctx context.Context, duration time.Duration, attrs []attribute.KeyValue)
|
||||
HTTPResponseSize(ctx context.Context, sizeBytes int64, attrs []attribute.KeyValue)
|
||||
InFlightRequestStart(ctx context.Context, attrs []attribute.KeyValue)
|
||||
|
@ -38,7 +38,7 @@ type IMetricsRecorder interface {
|
|||
|
||||
type NoopMetricsRecorder struct{}
|
||||
|
||||
func (NoopMetricsRecorder) HTTPAttributes(_, _, _, _, _ string) []attribute.KeyValue {
|
||||
func (NoopMetricsRecorder) HTTPAttributes(_, _, _, _ string) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{}
|
||||
}
|
||||
|
||||
|
@ -68,13 +68,12 @@ type MetricsRecorder struct {
|
|||
reasons metric.Int64Counter
|
||||
}
|
||||
|
||||
func (r MetricsRecorder) HTTPAttributes(svcName, url, method, code, scheme string) []attribute.KeyValue {
|
||||
func (r MetricsRecorder) HTTPAttributes(svcName, url, method, code string) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String(svcName),
|
||||
semconv.HTTPRouteKey.String(url),
|
||||
semconv.HTTPRequestMethodKey.String(method),
|
||||
semconv.HTTPResponseStatusCodeKey.String(code),
|
||||
semconv.URLSchemeKey.String(scheme),
|
||||
semconv.HTTPURLKey.String(url),
|
||||
semconv.HTTPMethodKey.String(method),
|
||||
semconv.HTTPStatusCodeKey.String(code),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.13.0"
|
||||
)
|
||||
|
||||
const svcName = "mySvc"
|
||||
|
@ -38,10 +38,9 @@ func TestHTTPAttributes(t *testing.T) {
|
|||
},
|
||||
want: []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String(""),
|
||||
semconv.HTTPRouteKey.String(""),
|
||||
semconv.HTTPRequestMethodKey.String(""),
|
||||
semconv.HTTPResponseStatusCodeKey.String(""),
|
||||
semconv.URLSchemeKey.String("http"),
|
||||
semconv.HTTPURLKey.String(""),
|
||||
semconv.HTTPMethodKey.String(""),
|
||||
semconv.HTTPStatusCodeKey.String(""),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -54,10 +53,9 @@ func TestHTTPAttributes(t *testing.T) {
|
|||
},
|
||||
want: []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String("myService"),
|
||||
semconv.HTTPRouteKey.String("#123"),
|
||||
semconv.HTTPRequestMethodKey.String("POST"),
|
||||
semconv.HTTPResponseStatusCodeKey.String("300"),
|
||||
semconv.URLSchemeKey.String("http"),
|
||||
semconv.HTTPURLKey.String("#123"),
|
||||
semconv.HTTPMethodKey.String("POST"),
|
||||
semconv.HTTPStatusCodeKey.String("300"),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -70,17 +68,16 @@ func TestHTTPAttributes(t *testing.T) {
|
|||
},
|
||||
want: []attribute.KeyValue{
|
||||
semconv.ServiceNameKey.String("!@#$%^&*()_+|}{[];',./<>"),
|
||||
semconv.HTTPRouteKey.String(""),
|
||||
semconv.HTTPRequestMethodKey.String(""),
|
||||
semconv.HTTPResponseStatusCodeKey.String(""),
|
||||
semconv.URLSchemeKey.String("http"),
|
||||
semconv.HTTPURLKey.String(""),
|
||||
semconv.HTTPMethodKey.String(""),
|
||||
semconv.HTTPStatusCodeKey.String(""),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rec := MetricsRecorder{}
|
||||
res := rec.HTTPAttributes(tt.req.Service, tt.req.ID, tt.req.Method, tt.req.Code, "http")
|
||||
res := rec.HTTPAttributes(tt.req.Service, tt.req.ID, tt.req.Method, tt.req.Code)
|
||||
require.Equal(t, tt.want, res)
|
||||
})
|
||||
}
|
||||
|
@ -211,7 +208,7 @@ func TestMetrics(t *testing.T) {
|
|||
// some really simple tests just to make sure all methods are actually implemented and nothing panics
|
||||
func TestNoopMetricsRecorder_HTTPAttributes(t *testing.T) {
|
||||
no := NoopMetricsRecorder{}
|
||||
got := no.HTTPAttributes("", "", "", "", "")
|
||||
got := no.HTTPAttributes("", "", "", "")
|
||||
require.Empty(t, got)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package telemetry
|
|||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
// utils contain common utilities to help with telemetry
|
||||
|
@ -14,7 +14,7 @@ const provider = "flagd"
|
|||
func SemConvFeatureFlagAttributes(ffKey string, ffVariant string) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
semconv.FeatureFlagKey(ffKey),
|
||||
semconv.FeatureFlagResultVariant(ffVariant),
|
||||
semconv.FeatureFlagVariant(ffVariant),
|
||||
semconv.FeatureFlagProviderName(provider),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
func TestSemConvFeatureFlagAttributes(t *testing.T) {
|
||||
|
@ -35,7 +35,7 @@ func TestSemConvFeatureFlagAttributes(t *testing.T) {
|
|||
case semconv.FeatureFlagKeyKey:
|
||||
require.Equal(t, test.key, attribute.Value.AsString(),
|
||||
"expected flag key: %s, but received: %s", test.key, attribute.Value.AsString())
|
||||
case semconv.FeatureFlagResultVariantKey:
|
||||
case semconv.FeatureFlagVariantKey:
|
||||
require.Equal(t, test.variant, attribute.Value.AsString(),
|
||||
"expected flag variant: %s, but received %s", test.variant, attribute.Value.AsString())
|
||||
case semconv.FeatureFlagProviderNameKey:
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var alphanumericRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||
|
||||
// ConvertToJSON attempts to convert the content of a file to JSON based on the file extension.
|
||||
// The media type is used as a fallback in case the file extension is not recognized.
|
||||
func ConvertToJSON(data []byte, fileExtension string, mediaType string) (string, error) {
|
||||
var detectedType string
|
||||
if fileExtension != "" {
|
||||
// file extension only contains alphanumeric characters
|
||||
detectedType = alphanumericRegex.ReplaceAllString(fileExtension, "")
|
||||
} else {
|
||||
parsedMediaType, _, err := mime.ParseMediaType(mediaType)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine file format: %w", err)
|
||||
}
|
||||
detectedType = parsedMediaType
|
||||
}
|
||||
|
||||
// Normalize the detected type
|
||||
detectedType = strings.ToLower(detectedType)
|
||||
|
||||
switch detectedType {
|
||||
case "yaml", "yml", "application/yaml", "application/x-yaml":
|
||||
str, err := YAMLToJSON(data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error converting blob from yaml to json: %w", err)
|
||||
}
|
||||
return str, nil
|
||||
case "json", "application/json":
|
||||
return string(data), nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported file format: '%s'", detectedType)
|
||||
}
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertToJSON(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
data []byte
|
||||
fileExtension string
|
||||
mediaType string
|
||||
want string
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
"json file type": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: "json",
|
||||
mediaType: "application/json",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"json file type using in http path": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: ".json/",
|
||||
mediaType: "",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"json file type with encoding": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: "json",
|
||||
mediaType: "application/json; charset=utf-8",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"yaml file type": {
|
||||
data: []byte("flags:\n foo: bar"),
|
||||
fileExtension: "yaml",
|
||||
mediaType: "application/yaml",
|
||||
want: `{"flags":{"foo":"bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"yaml file type with encoding": {
|
||||
data: []byte("flags:\n foo: bar"),
|
||||
fileExtension: "yaml",
|
||||
mediaType: "application/yaml; charset=utf-8",
|
||||
want: `{"flags":{"foo":"bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"yml file type": {
|
||||
data: []byte("flags:\n foo: bar"),
|
||||
fileExtension: "yml",
|
||||
mediaType: "application/x-yaml",
|
||||
want: `{"flags":{"foo":"bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"invalid yaml": {
|
||||
data: []byte("invalid: [yaml: content"),
|
||||
fileExtension: "yaml",
|
||||
mediaType: "application/yaml",
|
||||
wantErr: true,
|
||||
errContains: "error converting blob from yaml to json",
|
||||
},
|
||||
"unsupported file type": {
|
||||
data: []byte("some content"),
|
||||
fileExtension: "txt",
|
||||
mediaType: "text/plain",
|
||||
wantErr: true,
|
||||
errContains: "unsupported file format",
|
||||
},
|
||||
"empty file type with valid media type": {
|
||||
data: []byte(`{"flags": {"foo": "bar"}}`),
|
||||
fileExtension: "",
|
||||
mediaType: "application/json",
|
||||
want: `{"flags": {"foo": "bar"}}`,
|
||||
wantErr: false,
|
||||
},
|
||||
"invalid media type": {
|
||||
data: []byte("some content"),
|
||||
fileExtension: "",
|
||||
mediaType: "invalid/\\type",
|
||||
wantErr: true,
|
||||
errContains: "unable to determine file format",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, err := ConvertToJSON(tt.data, tt.fileExtension, tt.mediaType)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ConvertToJSON() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.wantErr {
|
||||
if err == nil || !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("ConvertToJSON() expected error containing %q, got %v", tt.errContains, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("ConvertToJSON() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// converts YAML byte array to JSON string
|
||||
func YAMLToJSON(rawFile []byte) (string, error) {
|
||||
if len(rawFile) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ms map[string]interface{}
|
||||
if err := yaml.Unmarshal(rawFile, &ms); err != nil {
|
||||
return "", fmt.Errorf("error unmarshaling yaml: %w", err)
|
||||
}
|
||||
|
||||
r, err := json.Marshal(ms)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling json: %w", err)
|
||||
}
|
||||
|
||||
return string(r), err
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestYAMLToJSON(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input []byte
|
||||
expected string
|
||||
expectedError bool
|
||||
}{
|
||||
"empty": {
|
||||
input: []byte(""),
|
||||
expected: "",
|
||||
expectedError: false,
|
||||
},
|
||||
"simple yaml": {
|
||||
input: []byte("key: value"),
|
||||
expected: `{"key":"value"}`,
|
||||
expectedError: false,
|
||||
},
|
||||
"nested yaml": {
|
||||
input: []byte("parent:\n child: value"),
|
||||
expected: `{"parent":{"child":"value"}}`,
|
||||
expectedError: false,
|
||||
},
|
||||
"invalid yaml": {
|
||||
input: []byte("invalid: yaml: : :"),
|
||||
expectedError: true,
|
||||
},
|
||||
"array yaml": {
|
||||
input: []byte("items:\n - item1\n - item2"),
|
||||
expected: `{"items":["item1","item2"]}`,
|
||||
expectedError: false,
|
||||
},
|
||||
"complex yaml": {
|
||||
input: []byte("bool: true\nnum: 123\nstr: hello\nobj:\n nested: value\narr:\n - 1\n - 2"),
|
||||
expected: `{"arr":[1,2],"bool":true,"num":123,"obj":{"nested":"value"},"str":"hello"}`,
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
output, err := YAMLToJSON(tt.input)
|
||||
|
||||
if tt.expectedError && err == nil {
|
||||
t.Error("expected error but got none")
|
||||
}
|
||||
if !tt.expectedError && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if output != tt.expected {
|
||||
t.Errorf("expected output '%v', got '%v'", tt.expected, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
---
|
||||
# Valid statuses: draft | proposed | rejected | accepted | superseded
|
||||
status: draft
|
||||
author: Your Name
|
||||
created: YYYY-MM-DD
|
||||
updated: YYYY-MM-DD
|
||||
---
|
||||
|
||||
# Title
|
||||
|
||||
<!--
|
||||
This section should be one or two paragraphs that just explains what the goal of this decision is going to be, but without diving too deeply into the "why", "why now", "how", etc.
|
||||
Ensure anyone opening the document will form a clear understanding of the intent from reading this paragraph(s).
|
||||
-->
|
||||
|
||||
## Background
|
||||
|
||||
<!--
|
||||
The next section is the "Background" section. This section should be at least two paragraphs and can take up to a whole page in some cases.
|
||||
The guiding goal of the background section is: as a newcomer to this project (new employee, team transfer), can I read the background section and follow any links to get the full context of why this change is necessary?
|
||||
|
||||
If you can't show a random engineer the background section and have them acquire nearly full context on the necessity for the RFC, then the background section is not full enough. To help achieve this, link to prior RFCs, discussions, and more here as necessary to provide context so you don't have to simply repeat yourself.
|
||||
-->
|
||||
|
||||
## Requirements
|
||||
|
||||
<!--
|
||||
This section outlines the requirements that the proposal must meet.
|
||||
These requirements should be derived from the background section and should be clear, concise, and actionable.
|
||||
This is where you can specify the goals and constraints that the proposal must satisfy.
|
||||
This could include performance metrics, security considerations, user experience goals, and any other relevant criteria.
|
||||
-->
|
||||
* {Requirement 1}
|
||||
* {Requirement 2}
|
||||
* {Requirement 3}
|
||||
* … <!-- numbers of requirements can vary -->
|
||||
|
||||
## Considered Options
|
||||
|
||||
<!--
|
||||
This section lists all the options that were considered for addressing the need outlined in the background section.
|
||||
Each option should be clearly defined with a descriptive title.
|
||||
This provides a comprehensive overview of the solution space that was explored before making a decision.
|
||||
The options will be evaluated in the proposal section, where the chosen approach is justified.
|
||||
-->
|
||||
|
||||
* {title of option 1}
|
||||
* {title of option 2}
|
||||
* {title of option 3}
|
||||
* … <!-- numbers of options can vary -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!--
|
||||
The next required section is "Proposal" or "Goal".
|
||||
Given the background above, this section proposes a solution.
|
||||
This should be an overview of the "how" for the solution.
|
||||
Include content like diagrams, prototypes, and high-level requirements.
|
||||
-->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### API changes
|
||||
|
||||
<!--
|
||||
This section should describe any API changes that are part of the proposal.
|
||||
This includes any new endpoints, changes to existing endpoints, or modifications to the data model.
|
||||
It should provide enough detail for developers to understand how the API will evolve and what impact it will have on existing clients.
|
||||
-->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### Consequences
|
||||
|
||||
* Good, because {positive consequence, e.g., improvement of one or more desired qualities, …}
|
||||
* Bad, because {negative consequence, e.g., compromising one or more desired qualities, …}
|
||||
* … <!-- numbers of consequences can vary -->
|
||||
|
||||
### Timeline
|
||||
|
||||
<!--
|
||||
This section outlines a high level timeline for implementing the proposal.
|
||||
It should include key milestones, deadlines, and any dependencies that need to be addressed.
|
||||
This helps to set expectations for the size of the change and the expected timeline for completion.
|
||||
-->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
### Open questions
|
||||
|
||||
* {Question 1}
|
||||
* … <!-- numbers of question can vary -->
|
||||
|
||||
<!-- This is an optional element. Feel free to remove. -->
|
||||
## More Information
|
||||
|
||||
<!--
|
||||
This section provides additional context, evidence, or documentation to support the decision.
|
||||
Use this space to provide any supplementary information that would be helpful for future readers
|
||||
to fully understand the decision and its implications.
|
||||
-->
|
|
@ -1,81 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @toddbaert
|
||||
created: 2025-05-16
|
||||
updated: --
|
||||
---
|
||||
|
||||
# Adoption of Cucumber/Gherkin for `flagd` Testing Suite
|
||||
|
||||
This decision document outlines the rationale behind adopting the Cucumber/Gherkin testing framework for the `flagd` project’s testing suite. The goal is to establish a clear, maintainable, and language-agnostic approach for writing integration and behavior-driven tests.
|
||||
|
||||
By leveraging Gherkin’s natural language syntax and Cucumber’s mature ecosystem, we aim to improve test clarity and accessibility across teams, enabling both developers and non-developers to contribute to test case development and validation.
|
||||
|
||||
## Background
|
||||
|
||||
`flagd` is an open-source feature flagging engine that forms a core part of the OpenFeature ecosystem. As such, it includes many clients (providers) written in multiple languages and it needs robust, readable, and accessible testing frameworks that allow for scalable behavior-driven testing.
|
||||
|
||||
Previously, test cases for `flagd` providers were written in language-specific test frameworks, which created fragmentation and limited contributions from engineers who weren’t familiar with the language in question. Furthermore, the ability to validate consistent feature flag behavior across multiple SDKs and environments became increasingly important as adoption grew, and in-process evaluation was implemented.
|
||||
|
||||
To address this, the engineering team investigated frameworks that would enable:
|
||||
|
||||
- Behavior-driven development (BDD) to validate consistent flag evaluation behavior, configuration, and provider life-cycle (connection, etc).
|
||||
- High cross-language support to integrate with multiple SDKs and tools.
|
||||
- Ease of use for writing, understanding, enhancing and maintaining tests.
|
||||
|
||||
After evaluating our options and experimenting with prototypes, we adopted Cucumber with Gherkin syntax for our testing strategy.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Must be supported across a wide variety of programming languages.
|
||||
- Must offer mature tooling and documentation.
|
||||
- Must enable the writing of easily understandable, high-level test cases.
|
||||
- Must be open source.
|
||||
- Should support automated integration in CI pipelines.
|
||||
- Should support parameterized and reusable test definitions.
|
||||
|
||||
## Considered Options
|
||||
|
||||
- Adoption of Cucumber/Gherkin e2e testing framework
|
||||
- No cross-implementation e2e testing framework (rely on unit tests)
|
||||
- Custom e2e testing framework, perhaps based on csv or other tabular input/output assertions
|
||||
|
||||
## Proposal
|
||||
|
||||
We adopted the Cucumber testing framework, using Gherkin syntax to define feature specifications and test behaviors. Gherkin offers a structured and readable DSL (domain-specific language) that enables concise expression of feature behaviors in plain English, making test scenarios accessible to both technical and non-technical contributors.
|
||||
|
||||
We use Cucumber’s tooling in combination with language bindings (e.g., Go, JavaScript, Python) to execute these scenarios across different environments and SDKs. Step definitions are implemented using the idiomatic tools of each language, while test scenarios remain shared and version-controlled.
|
||||
|
||||
### API changes
|
||||
|
||||
N/A – this decision does not introduce API-level changes but applies to test infrastructure and development workflow.
|
||||
|
||||
### Consequences
|
||||
|
||||
#### Pros
|
||||
|
||||
- Test scenarios are readable and accessible to a broad range of contributors.
|
||||
- Cucumber and Gherkin are supported in most major programming languages.
|
||||
- Tests are partially decoupled from the underlying implementation language.
|
||||
- Parameterized and reuseable test definitions mean new validations and assertions can often be added in providers without writing any code.
|
||||
|
||||
#### Cons
|
||||
|
||||
- Adding a new framework introduces some complexity and a learning curve.
|
||||
- In some cases/runtimes, debugging failed tests in Gherkin can be more difficult than traditional unit tests.
|
||||
|
||||
### Timeline
|
||||
|
||||
N/A - this is a retrospective document, timeline was not recorded.
|
||||
|
||||
### Open questions
|
||||
|
||||
- Should we enforce Gherkin for all providers?
|
||||
|
||||
## More Information
|
||||
|
||||
- [flagd Testbed Repository](https://github.com/open-feature/flagd-testbed)
|
||||
- [Cucumber Documentation](https://cucumber.io/docs/)
|
||||
- [Gherkin Syntax Guide](https://cucumber.io/docs/gherkin/)
|
||||
- [flagd GitHub Repository](https://github.com/open-feature/flagd)
|
||||
- [OpenFeature Project Overview](https://openfeature.dev/)
|
|
@ -1,77 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @tangenti
|
||||
created: 2025-06-16
|
||||
updated: 2025-06-16
|
||||
---
|
||||
|
||||
# Decouple flag sync sources and flag sets
|
||||
|
||||
The goal is to support dynamic flag sets for flagd providers and decouple sources and flag sets.
|
||||
|
||||
## Background
|
||||
|
||||
Flagd daemon syncs flag configurations from multiple sources. A single source provides a single config, which has an optional flag set ID that may or may not change in the following syncs of the same source.
|
||||
|
||||
The in-process provider uses `selector` to specify the desired source. In order to get a desired flag set, a provider has to stick to a source that provides that flag set. In this case, the flagd daemon cannot remove a source without breaking the dependant flagd providers.
|
||||
|
||||
Assumptions of the current model
|
||||
|
||||
- `flagSetId`s must be unique across different sources or the configuration is considered invalid.
|
||||
- In-process providers request at most one flag set.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Flagd daemon can remove a source without breaking in-process providers that depend on the flag set the source provides.
|
||||
- In-process providers can select based on flag sets.
|
||||
- No breaking changes for the current usage of `selector`
|
||||
|
||||
## Proposal
|
||||
|
||||
### API change
|
||||
|
||||
#### Flag Configuration Schema
|
||||
|
||||
Add an optional field `flagsetID` under `flag` or `flag.metadata`. The flag set ID cannot be specified if a flag set ID is specified for the config.
|
||||
|
||||
### Flagd Sync Selector
|
||||
|
||||
Selector will be extended for generic flags selection, starting with checking the equivalence of `source` and `flagsetID` of flags.
|
||||
|
||||
Example
|
||||
|
||||
```yaml
|
||||
# Flags from the source `override`
|
||||
selector: override
|
||||
|
||||
# Flags from the source `override`
|
||||
selector: source=override
|
||||
|
||||
# Flags from the flag set `project-42`
|
||||
selector: flagsetID=project-42
|
||||
```
|
||||
|
||||
The semantic can later be extended with a more complex design, such as AIP-160 filter or Kubernetes selections. This is out of the scope of this ADR.
|
||||
|
||||
### Flagd Daemon Storage
|
||||
|
||||
1. Flagd will have separate stores for `flags` and `sources`
|
||||
|
||||
2. `selector` will be removed from the store
|
||||
|
||||
3. `flagSetID` will be added as part of `model.Flag` or under `model.Flag.Metadata` for better consistency with the API.
|
||||
|
||||
### Flags Sync
|
||||
|
||||
Sync server would count the extended syntax of `selector` and filter the list of flags on-the-fly answering the requests from the providers.
|
||||
|
||||
The existing conflict resolving based on sources remains the same. Resyncs on removing flags remains unchanged as well.
|
||||
|
||||
## Consequences
|
||||
|
||||
### The good
|
||||
|
||||
- One source can have multiple flag sets.
|
||||
- `selector` works on a more grandular level.
|
||||
- No breaking change
|
||||
- Sync servers and clients now hold the same understanding of the `selector` semantic.
|
|
@ -1,144 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @tangenti
|
||||
created: 2025-06-27
|
||||
updated: 2025-06-27
|
||||
---
|
||||
|
||||
# Support for Duplicate Flag Keys
|
||||
|
||||
This ADR proposes allowing a single sync source to provide multiple flags that share the same key. This enables greater flexibility for modularizing flag configurations.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, the `flagd` [flag configuration](https://flagd.dev/schema/v0/flags.json) stores flags in a JSON object (a map), where each key must be unique. While the JSON specification technically allows duplicate keys, it's not recommended and not well-supported in the implementations.
|
||||
|
||||
This limitation prevents use cases for flag modularization and multi-tenancy, such as:
|
||||
|
||||
- **Component-based Flags:** Two different services, each with its own in-process provider, cannot independently define a flag with the same key when communicating with the same `flagd` daemon.
|
||||
- **Multi-Tenant Targeting:** A single flagd daemon cannot use the same flag key with different targeting rules for different tenants
|
||||
|
||||
## Requirements
|
||||
|
||||
- Allow a single sync source to define multiple flags that have the same key.
|
||||
- Flags from a sync source with the same keys can have different types and targeting rules.
|
||||
- No breaking changes for the current flagd flag configuration schema or flagd sync services.
|
||||
|
||||
## Proposal
|
||||
|
||||
We will update the `flagd` flag configuration schema to support receiving flags as an **array of flag objects**. The existing schema will remain fully supported.
|
||||
|
||||
### API Change
|
||||
|
||||
#### Flag Configuration Schema
|
||||
|
||||
We'll add a new schema as a [subschema](https://json-schema.org/learn/glossary#subschema) to the existing flagd flag configuration schema. It will be a composite of the original schema except `flags` (`#/definitions/base`), with a new schema for `flags` that allows flags array in addition to the currently supported flags object. The existing main schema will be the composite of `#/definitions/base` and the subschema for the flags object.
|
||||
|
||||
```json
|
||||
...
|
||||
"flagsArray": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/flag"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "Key of the flag",
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"key"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"flagsArraySchema": {
|
||||
"$id": "https://flagd.dev/schema/v0/flags.json#flagsarray",
|
||||
"type": "object",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/base"
|
||||
},
|
||||
{
|
||||
"properties": {
|
||||
"flags": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/definitions/flagsArray"
|
||||
},
|
||||
{
|
||||
"$ref": "#/definitions/flagsMap"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"flags"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
If the config level flag set ID is not specified, `metadata.flagSetId` of each flag will be interpreted as its flag set ID.
|
||||
|
||||
A flag will be uniquely identified by the composite key `(flagKey, flagSetId)`. The following three flags will be considered as three different flags.
|
||||
|
||||
1. `{"flagKey": "enable-feature", "flagSetId": ""}`
|
||||
2. `{"flagKey": "enable-feature", "flagSetId": "default"}`
|
||||
3. `{"flagKey": "enable-feature", "flagSetId": "beta"}`
|
||||
|
||||
### Flagd daemon
|
||||
|
||||
Flagd daemon will perform the JSON schema checks with the reference to `https://flagd.dev/schema/v0/flags.json#flagsarray`, allowing both flags as an object and as an array.
|
||||
|
||||
If the flag array contains two or more flags with the same composite key, the config will be considered as invalid.
|
||||
|
||||
If the request from in-process flagd providers result in a config that has duplicate flag keys, the flagd daemon will only keep one of them in the response.
|
||||
|
||||
### Flagd Daemon Storage
|
||||
|
||||
1. Flagd will have separate stores for `flags` and `sources`.
|
||||
|
||||
1. The `flags` store will use the composite key for flags.
|
||||
|
||||
1. `selector` will be removed from the store
|
||||
|
||||
1. `flagSetId` will be moved from `source` metadata to `flag` metadata.
|
||||
|
||||
### Flags Lifecycle
|
||||
|
||||
Currently, the flags configurations from the latest update of a source will trigger a `sync.ALL` sync. If a flag was presented in the previous configuration but not in the current configuration, it will be removed. In another word, the latest source that provides the config for a flag will take the ownership of a flag, and any subsequent configs are considered as the full states of the flags that are owned by the source.
|
||||
|
||||
We'll keep the same behaviors with this proposal:
|
||||
|
||||
1. If two sources provide the flags with the same composite key, the latest one will be stored.
|
||||
|
||||
2. If a flag from a source no longer presents in the latest configuration of the same source, it will be removed.
|
||||
|
||||
This behavior is less ideal as the ownership management depends on the ordre of the sync. This should be addressed in a separate ADR.
|
||||
|
||||
### Consequences
|
||||
|
||||
#### The good
|
||||
|
||||
- One source can provide flags with the same keys.
|
||||
- Flag set ID no longer bound to a source, so one source can have multiple flag sets.
|
||||
- No breaking change of the API definition and the API behaviors.
|
||||
- No significant change on the flagd stores and how selections work.
|
||||
|
||||
#### The bad
|
||||
|
||||
- The proposal still leverages the concept of flag set in the flagd storage.
|
||||
|
||||
- The schema does not guarantee that flags of the same flag set from the same source will not have the same keys. This is guaranteed in the proposal of #1634.
|
||||
|
||||
- Compared to #1634, this proposal does not allow to define flag set wide metadata.
|
|
@ -1,155 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: Todd Baert
|
||||
created: 2025-06-05
|
||||
updated: 2025-06-05
|
||||
---
|
||||
|
||||
# Flag and Targeting Configuration
|
||||
|
||||
## Background
|
||||
|
||||
Feature flag systems require a flexible, safe, and portable way to express targeting rules that can evaluate contextual data to determine which variant of a feature to serve.
|
||||
|
||||
flagd's targeting system was designed with several key requirements:
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Language agnostic**: Rules must be portable across different programming languages, ideally relying on existing expression language(s)
|
||||
- **Safe evaluation**: No arbitrary code execution or system access
|
||||
- **Deterministic**: Same inputs must always produce same outputs
|
||||
- **Extensible**: Support for the addition of domain-specific operations relevant to feature flags
|
||||
- **Developer and machine friendly**: Human-readable, easily validated, and easily serialized
|
||||
|
||||
## Proposal
|
||||
|
||||
### JSON Logic as the Foundation
|
||||
|
||||
flagd chose **JSON Logic** as its core evaluation engine, implementing a modified version with custom extensions.
|
||||
This provides a secure, portable foundation where rules are expressed as JSON objects with operators as keys and parameters as values.
|
||||
|
||||
#### Benefits realized
|
||||
|
||||
- Rules can be stored in databases, transmitted over networks, shared between frontend/backend, and embedded in Kubernetes custom resources
|
||||
- No eval() or code injection risks - computations are deterministic and sand-boxed
|
||||
- Implementations exist in most languages
|
||||
|
||||
#### Overview
|
||||
|
||||
The system provides two tiers of operators:
|
||||
|
||||
##### Primitive JSON Logic Operators (inherited from the JSONLogic)
|
||||
|
||||
- Logical: `and`, `or`, `!`, `!!`
|
||||
- Comparison: `==`, `!=`, `>`, `<`, etc
|
||||
- Arithmetic: `+`, `-`, `*`, `/`, `%`
|
||||
- Array operations: `in`, `map`, `filter`, etc
|
||||
- String operations: `cat`, `substr`, etc
|
||||
- Control flow: `if`
|
||||
- Assignment and extraction: `var`
|
||||
|
||||
##### Custom flagd Extensions
|
||||
|
||||
- `fractional`: Deterministic percentage-based distribution using murmur3 hashing
|
||||
- `starts_with`/`ends_with`: String prefix/suffix matching for common patterns
|
||||
- `sem_ver`: Semantic version comparisons with standard (npm-style) operators
|
||||
- `$ref`: Reference to shared evaluators for DRY principle
|
||||
|
||||
##### Evaluation Context and Automatic Enrichment
|
||||
|
||||
flagd automatically injects critical context values:
|
||||
|
||||
##### System-provided context
|
||||
|
||||
- `$flagd.flagKey`: The flag being evaluated (available v0.6.4+)
|
||||
- `$flagd.timestamp`: Unix timestamp of evaluation (available v0.6.7+)
|
||||
|
||||
This enables sophisticated targeting rules that can reference the flag itself or time-based conditions without requiring client-side context.
|
||||
|
||||
##### Reason Code System for Transparency
|
||||
|
||||
flagd returns specific reason codes with every evaluation to indicate how the decision was made:
|
||||
|
||||
1. **STATIC**: Flag has no targeting rules, and can be safely cached
|
||||
2. **TARGETING_MATCH**: Targeting rules matched and returned a variant
|
||||
3. **DEFAULT**: Targeting rules evaluated to null, fell back to default
|
||||
4. **CACHED**: Value retrieved from provider cache (RPC mode only)
|
||||
5. **ERROR**: Evaluation failed due to invalid configuration
|
||||
|
||||
This transparency enables:
|
||||
|
||||
- Appropriate caching strategies (only STATIC flags are cached)
|
||||
- Improved debugging, telemetry, and monitoring of flag behavior
|
||||
|
||||
##### Shared Evaluators for Reusability
|
||||
|
||||
The `$evaluators` top-level property enables shared targeting logic:
|
||||
|
||||
```json
|
||||
{
|
||||
"$evaluators": {
|
||||
"isEmployee": {
|
||||
"ends_with": [{"var": "email"}, "@company.com"]
|
||||
}
|
||||
},
|
||||
"flags": {
|
||||
"feature-x": {
|
||||
"state": "ENABLED",
|
||||
"defaultVariant": "enabled",
|
||||
"variants": {
|
||||
"enabled": true,
|
||||
"disabled": false
|
||||
},
|
||||
"targeting": {
|
||||
"if": [{"$ref": "isEmployee"}, "enabled", "disabled"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### Intelligent Caching Strategy
|
||||
|
||||
Only flags with reason **STATIC** are cached, as they have deterministic outputs. This ensures:
|
||||
|
||||
- Maximum cache efficiency for simple toggles
|
||||
- Fresh evaluation for complex targeting rules
|
||||
- Cache invalidation on configuration changes
|
||||
|
||||
##### Schema-Driven Configuration
|
||||
|
||||
Two schemas validate flag configurations:
|
||||
|
||||
- `https://flagd.dev/schema/v0/flags.json`: Overall flag structure
|
||||
- `https://flagd.dev/schema/v0/targeting.json`: Targeting rule validation
|
||||
|
||||
These enable:
|
||||
|
||||
- IDE support with autocomplete
|
||||
- Run-time and build-time validation
|
||||
- Separate validation of rules and overall configuration if desired
|
||||
|
||||
## Considered Options
|
||||
|
||||
- **Custom DSL**: Would require parsers in every language
|
||||
- **JavaScript/Lua evaluation**: Security risks and language lock-in
|
||||
- **CEL**: limited number of implementations at time of decision, can't be directly parsed/validated when embedded in Kubernetes resources
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Good, because implementations exist across languages
|
||||
- Good, because, no code injection or system access possible
|
||||
- Good, because combined with JSON schemas, we have rich IDE support
|
||||
- Good, because JSON is easily serialized and also can be represented/embedded in YAML
|
||||
|
||||
### Negative
|
||||
|
||||
- Bad, JSONLogic syntax can be cumbersome when rules are complex
|
||||
- Bad, hard to debug
|
||||
|
||||
## Conclusion
|
||||
|
||||
flagd's targeting configuration system represents a thoughtful balance between safety, portability, and capability.
|
||||
By building on JSON Logic and extending it with feature-flag-specific operators, flagd achieves remarkable flexibility while maintaining security and performance.
|
|
@ -1,121 +0,0 @@
|
|||
---
|
||||
status: draft
|
||||
author: @toddbaert
|
||||
created: 2025-06-06
|
||||
updated: 2025-06-06
|
||||
---
|
||||
|
||||
# Fractional Operator
|
||||
|
||||
The fractional operator enables deterministic, fractional feature flag distribution.
|
||||
|
||||
## Background
|
||||
|
||||
Nearly all feature flag systems require pseudorandom assignment support to facilitate key use cases, including experimentation and fractional progressive rollouts.
|
||||
Since flagd seeks to implement a full feature flag evaluation engine, such a feature is required.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Deterministic**: must be consistent given the same input (so users aren't re-assigned with each page view, for example)
|
||||
- **Performant**: must be quick; we want "predictable randomness", but with a relatively low performance cost
|
||||
- **Ease of use**: must be easy to use and understand for basic use-cases
|
||||
- **Customization**: must support customization, such as specifying a particular context attribute to "bucket" on
|
||||
- **Stability**: adding new variants should result in new assignments for as small a section of the audience as possible
|
||||
- **Strong avalanche effect**: slight input changes should result in relatively high chance of differential bucket assignment
|
||||
|
||||
## Considered Options
|
||||
|
||||
- We considered various "more common" hash algos, such as `sha1` and `md5`, but they were frequently slower than `Murmur3`, and didn't offer better performance for our purposes
|
||||
- Initially we required weights to sum to 100, but we've since revoked that requirement
|
||||
|
||||
## Proposal
|
||||
|
||||
### MurmurHash3 + numeric weights + optional targeting-key-based bucketing value
|
||||
|
||||
#### The fractional operator mechanism
|
||||
|
||||
The fractional operator facilitates **deterministic A/B testing and gradual rollouts** through a custom JSONLogic extension introduced in flagd version 0.6.4+.
|
||||
This operator splits feature flag variants into "buckets", based the `targetingKey` (or another optionally specified key), ensuring users consistently receive the same variant across sessions through sticky evaluation.
|
||||
|
||||
The core algorithm involves four steps: extracting a bucketing property from the evaluation context, hashing this value using MurmurHash3, mapping the hash to a [0, 100] range, and selecting variants based on cumulative weight thresholds.
|
||||
This approach guarantees that identical inputs always produce identical outputs (excepting the case of rules involving the `$flag.timestamp`), which is crucial for maintaining a consistent user experience.
|
||||
|
||||
#### MurmurHash3: The chosen algorithm
|
||||
|
||||
flagd specifically employs **MurmurHash3 (32-bit variant)** for its fractional operator, prioritizing performance and distribution quality over cryptographic security.
|
||||
This non-cryptographic hash function provides excellent performance and good avalanche properties (small input changes produce dramatically different outputs) while maintaining deterministic behavior essential for sticky evaluations.
|
||||
Its wide language implementation ensures identical results across different flagd providers, no matter the language in question.
|
||||
|
||||
#### Bucketing value
|
||||
|
||||
The bucking value is an optional first value to the operator (it may be a JSONLogic expression, other than an array).
|
||||
This allows enables targeting based on arbitrary attributes (individual users, companies/tenants, etc).
|
||||
If not specified, the bucketing value is a JSONLogic expression concatenating the `$flagd.flagKey` and the extracted [targeting key](https://openfeature.dev/specification/glossary/#targeting-key) (`targetingKey`) from the context (the inclusion of the flag key prevents users from landing in the same "bucket index" for all flags with the same number of buckets).
|
||||
If the bucking value does not resolve to a string, or the `targeting key` is undefined, the evaluation is considered erroneous.
|
||||
|
||||
```json
|
||||
// Default bucketing value
|
||||
{
|
||||
"cat": [
|
||||
{"var": "$flagd.flagKey"},
|
||||
{"var": "targetingKey"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Bucketing strategy implementation
|
||||
|
||||
After retrieving the bucketing value, and hashing it to a [0, 99] range, the algorithm iterates through variants, accumulating their relative weights until finding the bucket containing the hash value.
|
||||
|
||||
```go
|
||||
// Simplified implementation structure
|
||||
hashValue := murmur3Hash(bucketingValue) % 100
|
||||
currentWeight := 0
|
||||
for _, distribution := range variants {
|
||||
currentWeight += (distribution.weight * 100) / sumOfWeights
|
||||
if hashValue < currentWeight {
|
||||
return distribution.variant
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This approach supports flexible weight ratios; weights of [25, 50, 25] translate to 25%, 50%, and 25% distribution respectively as do [1, 2, 1].
|
||||
It's worth noting that the maximum bucket resolution is 1/100, meaning that the maximum ratio between variant distributions is 1:99 (ie: a weight distribution of [1, 100000] behaves the same as [1, 100]).
|
||||
|
||||
#### Format flexibility: Shorthand vs longhand
|
||||
|
||||
flagd provides two syntactic options for defining fractional distributions, balancing simplicity with precision. **Shorthand format** enables equal distribution by specifying variants as single-element arrays (in this case, an equal weight of 1 is automatically assumed):
|
||||
|
||||
```json
|
||||
{
|
||||
"fractional": [
|
||||
["red"],
|
||||
["blue"],
|
||||
["green"]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Longhand format** allows precise weight control through two-element arrays:
|
||||
|
||||
Note that in this example, we've also specified a custom bucketing value.
|
||||
|
||||
```json
|
||||
{
|
||||
"fractional": [
|
||||
{ "var": "email" },
|
||||
["red", 50],
|
||||
["blue", 20],
|
||||
["green", 30]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Consequences
|
||||
|
||||
- Good, because Murmur3 is fast, has good avalanche properties, and we don't need "cryptographic" randomness
|
||||
- Good, because we have flexibility but also simple shorthand
|
||||
- Good, because our bucketing algorithm is relatively stable when new variants are added
|
||||
- Bad, because we only support string bucketing values
|
||||
- Bad, because we don't have bucket resolution finer than 1:99
|
||||
- Bad, because we don't support JSONLogic expressions within bucket definitions
|
|
@ -1,157 +0,0 @@
|
|||
---
|
||||
status: rejected
|
||||
author: @alexandraoberaigner
|
||||
created: 2025-05-28
|
||||
updated: -
|
||||
---
|
||||
|
||||
# Add support for dynamic usage of Flag Sets to `flagd`
|
||||
|
||||
⚠️ REJECTED IN FAVOR OF <https://github.com/open-feature/flagd/blob/main/docs/architecture-decisions/duplicate-flag-keys.md> ⚠️
|
||||
|
||||
The goal of this decision document is to establish flag sets as a first class concept in `flagd`, and support the dynamic addition/update/removal of flag sets at runtime.
|
||||
|
||||
## Background
|
||||
|
||||
`flagd` is a language-agnostic feature flagging engine that forms a core part of the OpenFeature ecosystem.
|
||||
Flag configurations can be stored in different locations so called `sources`. These are specified at startup, e.g.:
|
||||
|
||||
````shell
|
||||
flagd start \
|
||||
--port 8013 \
|
||||
--uri file:etc/flagd/my-flags-1.json \
|
||||
--uri https://my-flags-2.com/flags
|
||||
````
|
||||
|
||||
The primary object here is to remove the coupling between sources and "logical" groups of flags, so that provider's aren't required to know their set of flags are sources from a file/http resource, etc, but could instead just supply a logical identifier for their flag set.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Should enable the dynamic usage of flag sets as logical identifiers.
|
||||
* Should support configurations without flag sets.
|
||||
* Should adhere to existing OpenFeature and flagd terminology and concepts
|
||||
|
||||
## Considered Options
|
||||
|
||||
1. Addition of flag set support in the [flags schema](https://flagd.dev/reference/schema/#targeting) and associated enhancements to `flagd` storage layer
|
||||
2. Support for dynamically adding/removing flag sources through some kind of runtime configuration API
|
||||
3. Support for dynamically adding/removing flag sources through some kind of "discovery" protocol or endpoint (ie: point flagd at a resource that would enumerate a mutable collection of secondary resources which represent flag sets)
|
||||
|
||||
## Proposal
|
||||
|
||||
To support the dynamic usage of flag sets we propose to adapt the flag schema & storage layer in `flagd`.
|
||||
The changes will decouple flag sets from flag sources by supporting multiple flag sets within single flag sources.
|
||||
Dynamic updates to flag sources is already a feature of `flagd`.
|
||||
|
||||
### New Schema Structure
|
||||
|
||||
The proposed changes to the current flagd schema would allow the following json structure for **sources**:
|
||||
|
||||
````json
|
||||
{
|
||||
"$schema": "https://flagd.dev/schema/v1/flagsets.json",
|
||||
"flagSets": {
|
||||
"my-project-1": {
|
||||
"metadata": {
|
||||
...
|
||||
},
|
||||
"flags": {
|
||||
"my-flag-1": {
|
||||
"metadata": {
|
||||
...
|
||||
},
|
||||
...
|
||||
},
|
||||
...
|
||||
},
|
||||
"$evaluators": {
|
||||
...
|
||||
}
|
||||
},
|
||||
"my-project-2": {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
````
|
||||
|
||||
We propose to introduce a 3rd json schema `flagSets.json`, which references to `flags.json`:
|
||||
|
||||
1. flagSets.json (new)
|
||||
2. flags.json
|
||||
3. targeting.json
|
||||
|
||||
We don't want to support merging of flag sets, due to implementation efforts & potential confusing behaviour of the
|
||||
merge strategy.
|
||||
Therefore, we propose for the initial implementation, `flagSetId`s must be unique across different sources or the configuration is considered invalid.
|
||||
In the future, it might be useful to support and implement multiple "strategies" for merging flagSets from different sources, but that's beyond the scope of this proposal.
|
||||
|
||||
### New Data Structure
|
||||
|
||||
The storage layer in `flagd` requires refactoring to better support multiple flag sets within one source.
|
||||
|
||||
````go
|
||||
package store
|
||||
|
||||
type State struct {
|
||||
FlagSets map[string]FlagSet `json:"flagSets"` // key = flagSetId
|
||||
}
|
||||
|
||||
type FlagSet struct {
|
||||
Flags map[string]model.Flag `json:"flags"` // key = flagKey
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type Flag struct {
|
||||
State string `json:"state"`
|
||||
DefaultVariant string `json:"defaultVariant"`
|
||||
Variants map[string]any `json:"variants"`
|
||||
Targeting json.RawMessage `json:"targeting,omitempty"`
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type Metadata = map[string]interface{}
|
||||
````
|
||||
|
||||
### OpenFeature Provider Implications
|
||||
|
||||
Currently, creating a new flagd provider can look like follows:
|
||||
|
||||
````java
|
||||
final FlagdProvider flagdProvider =
|
||||
new FlagdProvider(FlagdOptions.builder()
|
||||
.resolverType(Config.Evaluator.IN_PROCESS)
|
||||
.host("localhost")
|
||||
.port(8015)
|
||||
.selector("myFlags.json")
|
||||
.build());
|
||||
````
|
||||
|
||||
* With the proposed solution the `flagSetId` should be passed to the builder as selector argument instead of the source.
|
||||
* `null` is now a valid selector value, referencing flags which do not belong to a flag set. The default/fallback `flagSetId` should be `null`.
|
||||
|
||||
### Consequences
|
||||
|
||||
* Good, because it decouples flag sets from the sources
|
||||
* Good, because we will refactor the flagd storage layer (which is currently storing duplicate data & difficult to
|
||||
understand)
|
||||
* Good, because we can support backwards compatibility with the v0 schema
|
||||
* Good, because the "null" flag set is logically treated as any other flag set, reducing overall implementation complexity.
|
||||
* Bad, because there's additional complexity to support this new config schema as well as the current.
|
||||
* Bad, because this is a breaking change in the behavior of the `selector` member.
|
||||
|
||||
### Other Options
|
||||
|
||||
We evaluated the [mentioned options](#considered-options) as follows: _options 2 + 3: support for dynamically adding/removing flag sources_ and decided against this option because it requires much more implementation effort than _option 1_. Required changes include:
|
||||
|
||||
* flagd/core/sync: dynamic mode, which allows specifying the sync type that should be added/removed at runtime
|
||||
* flagd/flagd: startup dynamic sync configuration
|
||||
* make sure to still support static syncs
|
||||
|
||||
## More Information
|
||||
|
||||
* Current flagd schema: [flags.json](https://flagd.dev/schema/v0/flags.json)
|
||||
* flagd storage layer
|
||||
implementation: [store/flags.go](https://github.com/open-feature/flagd/blob/main/core/pkg/store/flags.go)
|
||||
* [flagd GitHub Repository](https://github.com/open-feature/flagd)
|
||||
* [OpenFeature Project Overview](https://openfeature.dev/)
|
|
@ -1,77 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: Dave Josephsen
|
||||
created: 2025-05-21
|
||||
updated: 2025-05-21
|
||||
---
|
||||
|
||||
# ADR: Multiple Sync Sources
|
||||
|
||||
It is the Intent of this document to articulate our rationale for supporting multiple flag syncronization sources (grpc, http, blob, local file, etc..) as a core design property of flagd. This document also includes a short discussion of how flagd is engineered to enable the community to extend it to support new sources in the future, to "future proof" the runtime against sources that don't yet exist, or those we may have omitted is a requisite byproduct of this architectural decision.
|
||||
|
||||
The goal of first-class multi-sync support generally is to broaden flagd's potential to suit the needs of many different types of users or architecture. By decoupling flag persistence from the runtime, flagd can focus on evaluation and sync, while enabling its user-base to choose a persistence layer that best suits their individual requirements.
|
||||
|
||||
## Background
|
||||
|
||||
The flagd daemon is a feature flag evaluation engine that forms a core part of the OpenFeature ecosystem as a production-grade reference implementation. Unlike OpenFeature SDK components, which are, by design, agnostic to the specifics of flag structure, evaluation, and persistence, flagd must take an opinionated stance about how feature-flags look, feel, and act.
|
||||
What schema best describes a flag? How should they be evaluated? And in what sort of persistence layer should they be stored?
|
||||
|
||||
This latter-most question -- _how should they be stored_ -- is the most opaque design detail of every commercial flagging product from the perspective its end-users.
|
||||
As a front-line engineer using a commercial flagging product, I may, for example, be exposed to flag schema by the product's SDK, and become familiar with its evaluation intricacies over time as my needs grow to require advanced features, or as I encounter surprising behavior. Rarely, however, is an end-user exposed to the details of a commercial product's storage backend.
|
||||
The SaaS vendor is expected to engineer a safe, fast, multi-tenant storage back-end, optimized for its flag schema and operational parameters, and insulate the customer from these details via its SDK.
|
||||
This presents Flagd, an open-source evaluation engine, with an interesting conundrum: what sort of flag storage best suits the needs of its potential user-base (which is everyone)?
|
||||
|
||||
## Requirements
|
||||
|
||||
* Support the storage technology that's most likely to meet the needs of current Flagd user-base (Don't be weird. Don't be surprising.)
|
||||
* Make it "easy" to extend the flagd runtime to support "new" storage systems
|
||||
* Horizontally scalable persistence layer
|
||||
* Minimize end-user exposure to persistence "quirks" (replication schemes, leader election, back-end scaling, consistency minutia, etc.. )
|
||||
* Reliable, Fast, Transparent
|
||||
* Full CRUD, read-optimized.
|
||||
|
||||
## Considered Options
|
||||
|
||||
* Be super-opinionated and prescribe a built-in raftesque key-value setup, analogous to the designs of k8s and kafka, which prescribe etcd and zookeeper respectively.
|
||||
* Roll a single "standard interface" for flag sync (published grpc spec or similar) (??)
|
||||
* Decouple storage from flagd entirely, by exposing a Golang interface type that "providers" can implement to provide support for any data store.
|
||||
|
||||
## Proposal
|
||||
<!--
|
||||
Unsure whether we want a diagram in this section or not. Happy to add one if we want one.
|
||||
-->
|
||||
The solution to the conundrum posited in the background section of this document is to decouple flag storage entirely from the rest of the runtime, including instead support for myriad commonly used data syncronization interfaces.
|
||||
This allows Flagd to be agnostic to flag storage, while enabling users to use whichever storage back-end best suits their environment.
|
||||
|
||||
To extend Flagd to support a new storage back-end, _sync providers_ implement the _ISync_ interface, detailed below:
|
||||
|
||||
```go
|
||||
type ISync interface {
|
||||
// Init is used by the sync provider to initialize its data structures and external dependencies.
|
||||
Init(ctx context.Context) error
|
||||
|
||||
// Sync is the contract between Runtime and sync implementation.
|
||||
// Note that, it is expected to return the first data sync as soon as possible to fill the store.
|
||||
Sync(ctx context.Context, dataSync chan<- DataSync) error
|
||||
|
||||
// ReSync is used to fetch the full flag configuration from the sync
|
||||
// This method should trigger an ALL sync operation then exit
|
||||
ReSync(ctx context.Context, dataSync chan<- DataSync) error
|
||||
|
||||
// IsReady shall return true if the provider is ready to communicate with the Runtime
|
||||
IsReady() bool
|
||||
}
|
||||
```
|
||||
|
||||
syncronization events "fan-in" from all configured sync providers to flagd's in-memory state-store via a channel carrying [`sync.DataSync`](https://github.com/open-feature/flagd/blob/main/core/pkg/store/flags.go#L19) events.
|
||||
These events detail the source and type of the change, along with the flag data in question and are merged into the currently held state by the [store](https://github.com/open-feature/flagd/blob/main/core/pkg/store/flags.go#L19).
|
||||
|
||||
### Consequences
|
||||
|
||||
Because syncronization providers may vary wildly with respect to their implementation details, supporting multiple sync providers means supporting custom configuration parameters for each provider.
|
||||
As a consequence, Flagd's configuration is itself made more complex, and its bootstrap process, whose goal is to create a [`runtime.Runtime`](https://github.com/open-feature/flagd/blob/main/flagd/pkg/runtime/runtime.go#L21) object from user-provided configuration, spends the preponderance of its time and effort interpreting, configuring, and initializing sync providers.
|
||||
There is, in fact, a custom bootstrap type, called the `syncbuilder` whose job is to bootstrap sync providers and arrange them into a map, for the runtime to use.
|
||||
|
||||
Further, Because sync providers may vary wildly with respect to implementation, the end-user's choice of sync sources can change Flagd's operational parameters. For example, end-users who choose the GRPC provider can expect flag-sync operations to be nearly immediate, because GRPC updates can be pushed to flagd as they occur, compared with end-users who chose the HTTP provider, who must wait for a timer to expire in order to notice updates, because HTTP is a polling-based implementation.
|
||||
|
||||
Finally, sync Providers also contribute a great deal of girth to flagd's documentation, because again, their setup, syntax, and runtime idiosyncrasies may differ wildly.
|
|
@ -1,211 +0,0 @@
|
|||
---
|
||||
status: accepted
|
||||
author: @beeme1mr
|
||||
created: 2025-06-06
|
||||
updated: 2025-06-20
|
||||
---
|
||||
|
||||
# Support Explicit Code Default Values in flagd Configuration
|
||||
|
||||
This ADR proposes adding support for explicitly configuring flagd to use code-defined default values by allowing `null` as a valid default variant. This change addresses the current limitation where users cannot differentiate between "use the code's default" and "use this configured default" without resorting to workarounds like misconfigured rulesets.
|
||||
|
||||
## Background
|
||||
|
||||
Currently, flagd requires a default variant to be specified in flag configurations. This creates a fundamental mismatch with the OpenFeature specification and common feature flag usage patterns where code-defined defaults serve as the ultimate fallback.
|
||||
|
||||
The current behavior leads to confusion and operational challenges:
|
||||
|
||||
1. **Two Sources of Truth**: Applications have default values defined in code (as per OpenFeature best practices), while flagd configurations require their own default variants. This dual-default pattern violates the principle of single source of truth.
|
||||
|
||||
2. **State Transition Issues**: When transitioning a flag from DISABLED to ENABLED state, the behavior changes unexpectedly:
|
||||
- DISABLED state: Flag evaluation falls through to code defaults
|
||||
- ENABLED state: Flag evaluation uses the configured default variant
|
||||
|
||||
3. **Workarounds**: Users resort to misconfiguring rulesets (e.g., returning invalid variants) to force fallback to code defaults, which generates confusing error states and complicates debugging.
|
||||
|
||||
4. **OpenFeature Alignment**: The OpenFeature specification emphasizes that code defaults should be the ultimate fallback, but flagd's current design doesn't provide a clean way to express this intent.
|
||||
|
||||
Related discussions and context can be found in the [OpenFeature specification](https://openfeature.dev/specification/types) and [flagd flag definitions reference](https://flagd.dev/reference/flag-definitions/).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Explicit Code Default Support**: Users must be able to explicitly configure a flag to use the code-defined default value as its resolution
|
||||
- **Backward Compatibility**: Existing flag configurations must continue to work without modification
|
||||
- **Clear Semantics**: The configuration must clearly indicate when code defaults are being used versus configured defaults
|
||||
- **Appropriate Reason Codes**: Resolution details must include appropriate reason codes when code defaults are used (e.g., `DEFAULT` or a new specific reason)
|
||||
- **Schema Validation**: JSON schema must support and validate the new configuration options
|
||||
- **Provider Compatibility**: All OpenFeature providers must handle the new behavior correctly
|
||||
- **Testbed Coverage**: flagd testbed must include test cases for the new functionality
|
||||
|
||||
## Considered Options
|
||||
|
||||
- **Option 1: Allow `null` as Default Variant** - Modify the schema to accept `null` as a valid value for defaultVariant, signaling "use code default"
|
||||
- **Option 2: Make Default Variant Optional** - Remove the requirement for defaultVariant entirely, with absence meaning "use code default"
|
||||
- **Option 3: Special Variant Value** - Define a reserved variant name (e.g., `"__CODE_DEFAULT__"`) that signals code default usage
|
||||
- **Option 4: New Configuration Property** - Add a new property like `useCodeDefault: true` alongside or instead of defaultVariant
|
||||
- **Option 5: Status Quo with Documentation** - Keep current behavior but improve documentation about workarounds
|
||||
|
||||
## Proposal
|
||||
|
||||
We propose implementing **Option 1: Allow `null` as Default Variant**, potentially combined with **Option 2: Make Default Variant Optional** for maximum flexibility.
|
||||
|
||||
The implementation leverages field presence in evaluation responses across all protocols (in-process, RPC, and OFREP). When a flag configuration has `defaultVariant: null`, the evaluation response omits the value field entirely, which serves as a programmatic signal to the client to use its code-defined default value.
|
||||
|
||||
This approach offers several key advantages:
|
||||
|
||||
1. **No Protocol Changes**: RPC and OFREP protocols remain unchanged
|
||||
2. **Clear Semantics**: Omitted value field = "use your code default"
|
||||
3. **Backward Compatible**: Existing clients and servers continue to work
|
||||
4. **Universal Pattern**: Works consistently across all evaluation modes
|
||||
|
||||
The absence of a value field provides an unambiguous signal that distinguishes between "the server evaluated to null/false/empty" (value field present) and "the server delegates to your code default" (value field absent).
|
||||
|
||||
### Implementation Details
|
||||
|
||||
1. **Schema Changes**:
|
||||
|
||||
```json
|
||||
{
|
||||
"defaultVariant": {
|
||||
"oneOf": [
|
||||
{ "type": "string" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"description": "Default variant to use. Set to null to use code-defined default."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. **Evaluation Behavior**:
|
||||
- When flag has `defaultVariant: null` and targeting returns no match
|
||||
- Server responds with reason set to reason "ERROR" and error code "FLAG_NOT_FOUND"
|
||||
- Client detects this reason value field and uses its code-defined default
|
||||
- This same pattern works across all evaluation modes
|
||||
|
||||
3. **Provider Implementation**:
|
||||
- No changes to existing providers
|
||||
|
||||
### Design Rationale
|
||||
|
||||
**Using "ERROR" reason**: We intentionally reuse the existing "ERROR" reason code rather than introducing a new one (like "CODE_DEFAULT"). This retains the current behavior of an disabled flag and allows for progressive enablement of a flag without unexpected variations in flag evaluation behavior.
|
||||
|
||||
Advantages of this approach:
|
||||
|
||||
- The "ERROR" reason is already used for cases where the flag is not found or misconfigured, so it aligns with the intent of using code defaults.
|
||||
- This approach avoids introducing new reason codes that would require additional handling in providers and clients.
|
||||
|
||||
### API changes
|
||||
|
||||
**Flag Configuration**:
|
||||
|
||||
```yaml
|
||||
flags:
|
||||
my-feature:
|
||||
state: ENABLED
|
||||
defaultVariant: null # Explicitly use code default
|
||||
variants:
|
||||
on: true
|
||||
off: false
|
||||
targeting:
|
||||
if:
|
||||
- "===":
|
||||
- var: user-type
|
||||
- "beta"
|
||||
- on
|
||||
```
|
||||
|
||||
**OFREP Response** when code default is indicated:
|
||||
|
||||
#### Single flag evaluation response
|
||||
|
||||
A single flag evaluation returns a `404` status code.
|
||||
|
||||
```json
|
||||
{
|
||||
"key": "my-feature",
|
||||
"errorCode": "FLAG_NOT_FOUND",
|
||||
// Optional error details
|
||||
"errorDetails": "Targeting not matched, using code default",
|
||||
"metadata": {}
|
||||
}
|
||||
```
|
||||
|
||||
#### Bulk flag evaluation response
|
||||
|
||||
```json
|
||||
{
|
||||
"flags": [
|
||||
// Flag is omitted from bulk response
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**flagd RPC Response** (ResolveBooleanResponse):
|
||||
|
||||
```protobuf
|
||||
{
|
||||
"reason": "ERROR",
|
||||
"errorCode": "FLAG_NOT_FOUND",
|
||||
"metadata": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Consequences
|
||||
|
||||
- Good, because it eliminates the confusion between code and configuration defaults
|
||||
- Good, because it provides explicit control over default behavior without workarounds
|
||||
- Good, because it aligns flagd more closely with OpenFeature specification principles
|
||||
- Good, because it supports gradual flag rollout patterns more naturally
|
||||
- Good, because it provides the ability to delegate to whatever is defined in code
|
||||
- Good, because it requires no changes to existing RPC or protocol signatures
|
||||
- Good, because it uses established patterns (field presence) for clear semantics
|
||||
- Good, because it maintains full backward compatibility
|
||||
- Bad, because it requires updates across multiple components (flagd, providers, testbed)
|
||||
- Bad, because it introduces a new concept that users need to understand
|
||||
- Neutral, because existing configurations continue to work unchange
|
||||
|
||||
### Implementation Plan
|
||||
|
||||
1. Update flagd-schemas with new JSON schema supporting null default variants
|
||||
2. Update flagd-testbed with comprehensive test cases for all evaluation modes
|
||||
3. Implement core logic in flagd to handle null defaults and omit value/variant fields
|
||||
4. Update OpenFeature providers with the latest schema and test harness to ensure they handle the new behavior correctly
|
||||
5. Documentation updates, migration guides, and playground examples to demonstrate the new configuration options
|
||||
|
||||
### Testing Considerations
|
||||
|
||||
To ensure correct implementation across all components:
|
||||
|
||||
1. **Provider Tests**: Each component (flagd, providers) must have unit tests verifying the handling of `null` as a default variant
|
||||
2. **Integration Tests**: End-to-end tests across different language combinations (e.g., Go flagd with Java provider)
|
||||
3. **OFREP Tests**: Verify JSON responses correctly omits flags with a `null` default variant
|
||||
4. **Backward Compatibility Tests**: Ensure old providers handle new responses gracefully
|
||||
5. **Consistency Tests**: Verify identical behavior across in-process, RPC, and OFREP modes
|
||||
|
||||
### Open questions
|
||||
|
||||
- How should providers handle responses with missing value fields in strongly-typed languages?
|
||||
- We'll handle the same way as with optional fields, using language-specific patterns (e.g., pointers in Go, `hasValue()` in Java).
|
||||
- Should we support both `null` and absent `defaultVariant` fields, or choose one approach?
|
||||
- Yes, we'll support both `null` and absent fields to maximize flexibility. An absent `defaultVariant` will be the equivalent of `null`.
|
||||
- What migration path should we recommend for users currently using workarounds?
|
||||
- Update the flag configurations to use `defaultVariant: null` and remove any misconfigured rulesets that force code defaults.
|
||||
- Should this feature be gated behind a configuration flag during initial rollout?
|
||||
- We'll avoid public facing documentation until the feature is fully implemented and tested.
|
||||
- How do we ensure consistent behavior across all provider implementations?
|
||||
- Gherkin tests will be added to the flagd testbed to ensure all providers handle the new behavior consistently.
|
||||
- Should providers validate that the reason is "DEFAULT" when value is omitted, or accept any omitted value as delegation?
|
||||
- Providers should accept any omitted value as delegation.
|
||||
- How do we handle edge cases where network protocols might strip empty fields?
|
||||
- It would behaving as expected, as the absence of fields is the intended signal.
|
||||
- When the client uses its code default after receiving a delegation response, what variant should be reported in telemetry/analytics?
|
||||
- The variant will be omitted, indicating that the code default was used.
|
||||
- Should we add explicit proto comments documenting the field omission behavior?
|
||||
- Leave this to the implementers, but it would be beneficial to add comments in the proto files to clarify this behavior for future maintainers.
|
||||
|
||||
## More Information
|
||||
|
||||
- [OpenFeature Specification - Flag Evaluation](https://openfeature.dev/specification/types#flag-evaluation)
|
||||
- [flagd Flag Definitions Reference](https://flagd.dev/reference/flag-definitions/)
|
||||
- [flagd JSON Schema Repository](https://github.com/open-feature/flagd-schemas)
|
||||
- [flagd Testbed](https://github.com/open-feature/flagd-testbed)
|
|
@ -33,7 +33,7 @@ erDiagram
|
|||
|
||||
### In-Process evaluation
|
||||
|
||||
In-process deployments embed the flagd evaluation engine directly into the client application through the use of an [in-process provider](./providers/index.md).
|
||||
In-process deployments embed the flagd evaluation engine directly into the client application through the use of an [in-process provider](./installation.md#in-process).
|
||||
The in-process provider is connected via the sync protocol to an implementing [gRPC service](./concepts/syncs.md#grpc-sync) that provides the flag definitions.
|
||||
You can use flagd as a [gRPC sync service](./reference/grpc-sync-service.md).
|
||||
In this mode, the flag sync stream will expose aggregated flag configurations currently configured through [syncs](./concepts/syncs.md).
|
||||
|
|
|
@ -22,4 +22,3 @@ Below is a non-exhaustive table of common feature flag use-cases, and how flagd
|
|||
| dynamic (context-sensitive) evaluation | flagd evaluations are context sensitive. Rules can use arbitrary context attributes as inputs for flag evaluation logic. |
|
||||
| fractional evaluation / random assignment | flagd's [fractional](../reference/custom-operations/fractional-operation.md) custom operation supports pseudorandom assignment of flag values. |
|
||||
| progressive roll-outs | Progressive roll-outs of new features can be accomplished by leveraging the [fractional](../reference/custom-operations/fractional-operation.md) custom operation as well as automation in your build pipeline, SCM, or infrastructure which updates the distribution over time. |
|
||||
| feature flag telemetry | flagd supports the OpenTelemetry conventions for feature flags, by returning compliant [resolution details](https://openfeature.dev/specification/types#resolution-details) and [metadata](../reference/monitoring.md#metadata), in addition to flag values. |
|
||||
|
|
|
@ -26,19 +26,14 @@ See [sync source](../reference/sync-configuration.md#source-configuration) confi
|
|||
The HTTP sync provider fetch flags from a remote source and periodically poll the source for flag definition updates.
|
||||
|
||||
```shell
|
||||
flagd start --uri https://my-flag-source/flags.json
|
||||
flagd start --uri https://my-flag-source.json
|
||||
```
|
||||
|
||||
In this example, `https://my-flag-source/flags.json` is a remote endpoint responding valid feature flag definition when
|
||||
In this example, `https://my-flag-source.json` is a remote endpoint responding valid feature flag definition when
|
||||
invoked with **HTTP GET** request.
|
||||
The polling interval, port, TLS settings, and authentication information can be configured.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
To optimize network usage, it honors the HTTP ETag protocol: if the server includes an `ETag` header in its response,
|
||||
flagd will store this value and send it in the `If-None-Match` header on subsequent requests. If the flag data has
|
||||
not changed, the server responds with 304 Not Modified, and flagd will skip updating its state. If the data has
|
||||
changed, the server returns the new content and a new ETag, prompting flagd to update its flags.
|
||||
|
||||
---
|
||||
|
||||
### gRPC sync
|
||||
|
@ -77,7 +72,7 @@ See [sync source](../reference/sync-configuration.md#source-configuration) confi
|
|||
|
||||
### GCS sync
|
||||
|
||||
The GCS sync provider fetches flags from a GCS blob and periodically polls the GCS for the flag definition updates.
|
||||
The GCS sync provider fetches flags from a GCS blob and periodically poll the GCS for the flag definition updates.
|
||||
It uses [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) if they
|
||||
are [configured](https://cloud.google.com/docs/authentication/provide-credentials-adc) to authorize the calls to GCS.
|
||||
|
||||
|
@ -90,36 +85,6 @@ In this example, `gs://my-bucket/my-flags.json` is expected to be a valid GCS UR
|
|||
The polling interval can be configured.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
### Azure Blob sync
|
||||
|
||||
The Azure Blob sync provider fetches flags from an Azure Blob Storage blob and periodically polls the blob for the flag definition updates.
|
||||
It uses [environment variables](https://pkg.go.dev/gocloud.dev/blob/azureblob#hdr-URLs) to set the Storage Account name and to
|
||||
authorize the calls to Azure Blob Storage.
|
||||
|
||||
```shell
|
||||
flagd start --uri azblob://my-container/my-flags.json
|
||||
```
|
||||
|
||||
In this example, assuming the environment variable AZURE_STORAGE_ACCOUNT is set to `myaccount`, and other options are not set, the service URL will be:
|
||||
`https://myaccount.blob.core.windows.net/my-container/my-flags.json`.
|
||||
This is expected to be a valid service URL accessible by flagd (either by being public or together with environment variable credentials).
|
||||
The polling interval can be configured.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) configuration for details.
|
||||
|
||||
### S3 sync
|
||||
|
||||
The S3 sync provider fetches flags from an S3 bucket and periodically polls for flag definition updates.
|
||||
It uses [AWS standardized credentials chain](https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html) to authorize the calls to AWS.
|
||||
|
||||
```shell
|
||||
flagd start --uri s3://my-bucket/my-flags.json
|
||||
```
|
||||
|
||||
In this example, `s3://my-bucket/my-flags.json` is expected to be a valid URI accessible by flagd
|
||||
(either by being public or together with the appropriate credentials read from a file or via the environment as described in the AWS docs linked above).
|
||||
The polling interval is configurable.
|
||||
See [sync source](../reference/sync-configuration.md#source-configuration) for details.
|
||||
|
||||
## Merging
|
||||
|
||||
Flagd can be configured to read from multiple sources at once, when this is the case flagd will merge all flag definition into a single
|
||||
|
|
21
docs/faq.md
21
docs/faq.md
|
@ -33,27 +33,6 @@ Please see [architecture](./architecture.md) and [installation](./installation.m
|
|||
|
||||
---
|
||||
|
||||
> How can I access the SBOM for flagd?
|
||||
|
||||
SBOMs for the flagd binary are available as assets on the [GitHub release page](https://github.com/open-feature/flagd/releases).
|
||||
Container SBOMs can be inspected using the Docker CLI.
|
||||
|
||||
An example of inspecting the SBOM for the latest flagd `linux/amd64` container image:
|
||||
|
||||
```shell
|
||||
docker buildx imagetools inspect ghcr.io/open-feature/flagd:latest \
|
||||
--format '{{ json (index .SBOM "linux/amd64").SPDX }}'
|
||||
```
|
||||
|
||||
An example of inspecting the SBOM for the latest flagd `linux/arm64` container image:
|
||||
|
||||
```shell
|
||||
docker buildx imagetools inspect ghcr.io/open-feature/flagd:latest \
|
||||
--format '{{ json (index .SBOM "linux/arm64").SPDX }}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
> Why doesn't flagd support {_my desired feature_}?
|
||||
|
||||
Because you haven't opened a PR or created an issue!
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -14,14 +14,13 @@ Providers for flagd come in two flavors: those that are built to communicate wit
|
|||
The following table lists all the available flagd providers.
|
||||
|
||||
| Technology | RPC | in-process |
|
||||
| --------------------------------------------------- | ---------------- | ---------------- |
|
||||
| ------------------------------------------------------------- | ---------------- | ---------------- |
|
||||
| :fontawesome-brands-golang: [Go](./go.md) | :material-check: | :material-check: |
|
||||
| :fontawesome-brands-java: [Java](./java.md) | :material-check: | :material-check: |
|
||||
| :fontawesome-brands-node-js: [Node.JS](./nodejs.md) | :material-check: | :material-check: |
|
||||
| :simple-php: [PHP](./php.md) | :material-check: | :material-close: |
|
||||
| :simple-dotnet: [.NET](./dotnet.md) | :material-check: | :material-check: |
|
||||
| :simple-python: [Python](./python.md) | :material-check: | :material-check: |
|
||||
| :fontawesome-brands-rust: [Rust](./rust.md) | :material-check: | :material-check: |
|
||||
| :simple-python: [Python](./python.md) | :material-check: | :material-close: |
|
||||
| :material-web: [Web](./web.md) | :material-check: | :material-close: |
|
||||
|
||||
For information on implementing a flagd provider, see the [specification](../reference/specifications/providers.md).
|
||||
For information on implementing a flagd provider, see the specifications for [RPC](../reference/specifications/rpc-providers.md) and [in-process](../reference/specifications/in-process-providers.md) providers.
|
|
@ -5,5 +5,4 @@
|
|||
{%
|
||||
include "https://raw.githubusercontent.com/open-feature/python-sdk-contrib/main/providers/openfeature-provider-flagd/README.md"
|
||||
start="## Installation"
|
||||
end="## License"
|
||||
%}
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# Rust provider
|
||||
|
||||
## Installation
|
||||
|
||||
{%
|
||||
include "https://raw.githubusercontent.com/open-feature/rust-sdk-contrib/refs/heads/main/crates/flagd/README.md"
|
||||
start="### Installation"
|
||||
end="### License"
|
||||
%}
|
|
@ -25,59 +25,24 @@ These types of feature flags are commonly used to gate access to a new feature u
|
|||
The second flag has the key `background-color` and is a multi-variant string.
|
||||
These are commonly used for A/B/(n) testing and experimentation.
|
||||
|
||||
### Running Flagd
|
||||
### Start flagd
|
||||
|
||||
=== "Docker"
|
||||
Run the following command to start flagd using docker. This will expose flagd on port `8013` and read from the `demo.flagd.json` file we downloaded in the previous step.
|
||||
|
||||
Run the following command to start flagd using docker. This will expose flagd on port `8013` and read from the `demo.flagd.json` file we downloaded in the previous step.
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
```shell
|
||||
docker run \
|
||||
--rm -it \
|
||||
--name flagd \
|
||||
-p 8013:8013 \
|
||||
-v $(pwd):/etc/flagd \
|
||||
ghcr.io/open-feature/flagd:latest start \
|
||||
--uri file:./etc/flagd/demo.flagd.json
|
||||
```
|
||||
```
|
||||
|
||||
??? "Tips for Windows users"
|
||||
??? "Tips for Windows users"
|
||||
In Windows, use WSL system for both the file location and Docker runtime.
|
||||
Mixed file systems does not work and this is a [limitation of Docker](https://github.com/docker/for-win/issues/8479).
|
||||
|
||||
=== "Docker Compose"
|
||||
|
||||
Create a docker-compose.yaml file with the following contents:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
flagd:
|
||||
image: ghcr.io/open-feature/flagd:latest
|
||||
volumes:
|
||||
- ./flags:/flags
|
||||
command: [
|
||||
'start',
|
||||
'--uri',
|
||||
'file:./flags/demo.flagd.json',
|
||||
]
|
||||
ports:
|
||||
- '8013:8013'
|
||||
```
|
||||
|
||||
Create a folder called `flags` where the JSON flag files can reside. [Download the flag definition](#download-the-flag-definition) and move this JSON file to the flags folder.
|
||||
|
||||
```text
|
||||
├── flags
|
||||
│ ├── demo.flagd.json
|
||||
├── docker-compose.yaml
|
||||
```
|
||||
|
||||
Open up a terminal and run the following:
|
||||
|
||||
```shell
|
||||
docker compose up
|
||||
```
|
||||
|
||||
### Evaluating a feature flag
|
||||
|
||||
Test it out by running the following cURL command in a separate terminal:
|
||||
|
|
|
@ -64,9 +64,7 @@ The `defaultVariant` is `red`, but it contains a [targeting rule](../flag-defini
|
|||
In this case, `25%` of the evaluations will receive `red`, `25%` will receive `blue`, and so on.
|
||||
|
||||
Assignment is deterministic (sticky) based on the expression supplied as the first parameter (`{ "cat": [{ "var": "$flagd.flagKey" }, { "var": "email" }]}`, in this case).
|
||||
The value retrieved by this expression is referred to as the "bucketing value" and must be a string.
|
||||
Other primitive types can be used by casting the value using `"cat"` operator.
|
||||
For example, a less deterministic distribution can be achieved using `{ "cat": [{ "var": "$flagd.timestamp" }]}`.
|
||||
The value retrieved by this expression is referred to as the "bucketing value".
|
||||
The bucketing value expression can be omitted, in which case a concatenation of the `targetingKey` and the `flagKey` will be used.
|
||||
|
||||
The `fractional` operation is a custom JsonLogic operation which deterministically selects a variant based on
|
||||
|
|
|
@ -56,15 +56,8 @@ A fully configured flag may look like this.
|
|||
"on",
|
||||
"off"
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"version": "17"
|
||||
}
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"team": "user-experience",
|
||||
"flagSetId": "ecommerce"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -163,7 +156,7 @@ Example of an invalid configuration:
|
|||
`targeting` is an **optional** property.
|
||||
A targeting rule **must** be valid JSON.
|
||||
Flagd uses a modified version of [JsonLogic](https://jsonlogic.com/), as well as some custom pre-processing, to evaluate these rules.
|
||||
If no targeting rules are defined, the response reason will always be `STATIC`, this allows for the flag values to be cached, this behavior is described [here](specifications/providers.md#flag-evaluation-caching).
|
||||
If no targeting rules are defined, the response reason will always be `STATIC`, this allows for the flag values to be cached, this behavior is described [here](specifications/rpc-providers.md#caching).
|
||||
|
||||
#### Variants Returned From Targeting Rules
|
||||
|
||||
|
@ -191,9 +184,6 @@ For example, when accessing flagd via HTTP, the POST body may look like this:
|
|||
|
||||
The evaluation context can be accessed in targeting rules using the `var` operation followed by the evaluation context property name.
|
||||
|
||||
The evaluation context can be appended by arbitrary key value pairs
|
||||
via the `-X` command line flag.
|
||||
|
||||
| Description | Example |
|
||||
| -------------------------------------------------------------- | ---------------------------------------------------- |
|
||||
| Retrieve property from the evaluation context | `#!json { "var": "email" }` |
|
||||
|
@ -346,13 +336,6 @@ Example:
|
|||
}
|
||||
```
|
||||
|
||||
## Metadata
|
||||
|
||||
Metadata can be defined at both the flag set (as a sibling of [flags](#flags)) and within each flag.
|
||||
Flag metadata conveys arbitrary information about the flag or flag set, such as a version number, or the business unit that is responsible for the flag.
|
||||
When flagd resolves flags, the returned [flag metadata](https://openfeature.dev/specification/types/#flag-metadata) is a merged representation of the metadata defined in the flag set, and the metadata defined in the flag, with the metadata defined in the flag taking priority.
|
||||
See the [playground](/playground/?scenario-name=Flag+metadata) for an interactive example.
|
||||
|
||||
## Boolean Variant Shorthand
|
||||
|
||||
Since rules that return `true` or `false` map to the variant indexed by the equivalent string (`"true"`, `"false"`), you can use shorthand for these cases.
|
||||
|
|
|
@ -11,10 +11,7 @@ flagd start [flags]
|
|||
### Options
|
||||
|
||||
```
|
||||
-H, --context-from-header stringToString add key-value pairs to map header values to context values, where key is Header name, value is context key (default [])
|
||||
-X, --context-value stringToString add arbitrary key value pairs to the flag evaluation context (default [])
|
||||
-C, --cors-origin strings CORS allowed origins, * will allow all origins
|
||||
--disable-sync-metadata Disables the getMetadata endpoint of the sync service. Defaults to false, but will default to true in later versions.
|
||||
-h, --help help for start
|
||||
-z, --log-format string Set the logging format, e.g. console or json (default "console")
|
||||
-m, --management-port int32 Port for management operations (default 8014)
|
||||
|
@ -28,12 +25,10 @@ flagd start [flags]
|
|||
-p, --port int32 Port to listen on (default 8013)
|
||||
-c, --server-cert-path string Server side tls certificate path
|
||||
-k, --server-key-path string Server side tls key path
|
||||
-d, --socket-path string Flagd unix socket path. With grpc the evaluations service will become available on this address. With http(s) the grpc-gateway proxy will use this address internally.
|
||||
-d, --socket-path string Flagd socket path. With grpc the service will become available on this address. With http(s) the grpc-gateway proxy will use this address internally.
|
||||
-s, --sources string JSON representation of an array of SourceConfig objects. This object contains 2 required fields, uri (string) and provider (string). Documentation for this object: https://flagd.dev/reference/sync-configuration/#source-configuration
|
||||
--stream-deadline duration Set a server-side deadline for flagd sync and event streams (default 0, means no deadline).
|
||||
-g, --sync-port int32 gRPC Sync port (default 8015)
|
||||
-e, --sync-socket-path string Flagd sync service socket path. With grpc the sync service will be available on this address.
|
||||
-f, --uri .yaml/.yml/.json Set a sync provider uri to read data from, this can be a filepath, URL (HTTP and gRPC), FeatureFlag custom resource, or GCS or Azure Blob. When flag keys are duplicated across multiple providers the merge priority follows the index of the flag arguments, as such flags from the uri at index 0 take the lowest precedence, with duplicated keys being overwritten by those from the uri at index 1. Please note that if you are using filepath, flagd only supports files with .yaml/.yml/.json extension.
|
||||
-f, --uri .yaml/.yml/.json Set a sync provider uri to read data from, this can be a filepath, URL (HTTP and gRPC) or FeatureFlag custom resource. When flag keys are duplicated across multiple providers the merge priority follows the index of the flag arguments, as such flags from the uri at index 0 take the lowest precedence, with duplicated keys being overwritten by those from the uri at index 1. Please note that if you are using filepath, flagd only supports files with .yaml/.yml/.json extension.
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
|
@ -47,25 +47,15 @@ Given below is the current implementation overview of flagd telemetry internals,
|
|||
|
||||
flagd exposes the following metrics:
|
||||
|
||||
- `http.server.request.duration` - Measures the duration of inbound HTTP requests
|
||||
- `http.server.response.body.size` - Measures the size of HTTP response messages
|
||||
- `http.server.active_requests` - Measures the number of concurrent HTTP requests that are currently in-flight
|
||||
- `feature_flag.flagd.impression` - Measures the number of evaluations for a given flag
|
||||
- `feature_flag.flagd.result.reason` - Measures the number of evaluations for a given reason
|
||||
- `http.server.duration`
|
||||
- `http.server.response.size`
|
||||
- `http.server.active_requests`
|
||||
- `feature_flag.flagd.impression`
|
||||
- `feature_flag.flagd.evaluation.reason`
|
||||
|
||||
> Please note that metric names may vary based on the consuming monitoring tool naming requirements.
|
||||
> For example, the transformation of OTLP metrics to Prometheus is described [here](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus).
|
||||
|
||||
### HTTP Metric Attributes
|
||||
|
||||
flagd uses the following OpenTelemetry Semantic Conventions for HTTP metrics:
|
||||
|
||||
- `service.name` - The name of the service
|
||||
- `http.route` - The matched route (path template)
|
||||
- `http.request.method` - The HTTP request method (GET, POST, etc.)
|
||||
- `http.response.status_code` - The HTTP response status code
|
||||
- `url.scheme` - The URI scheme (http or https)
|
||||
|
||||
## Traces
|
||||
|
||||
flagd creates the following spans as part of a trace:
|
||||
|
@ -93,8 +83,9 @@ official [OTEL collector example](https://github.com/open-telemetry/opentelemetr
|
|||
|
||||
```yaml
|
||||
services:
|
||||
jaeger:
|
||||
image: cr.jaegertracing.io/jaegertracing/jaeger:2.8.0
|
||||
# Jaeger
|
||||
jaeger-all-in-one:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "16686:16686"
|
||||
|
@ -102,7 +93,7 @@ services:
|
|||
- "14250"
|
||||
# Collector
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector:0.129.1
|
||||
image: otel/opentelemetry-collector:latest
|
||||
restart: always
|
||||
command: [ "--config=/etc/otel-collector-config.yaml" ]
|
||||
volumes:
|
||||
|
@ -115,10 +106,10 @@ services:
|
|||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
depends_on:
|
||||
- jaeger
|
||||
- jaeger-all-in-one
|
||||
prometheus:
|
||||
container_name: prometheus
|
||||
image: prom/prometheus:v2.53.5
|
||||
image: prom/prometheus:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
|
@ -137,8 +128,10 @@ receivers:
|
|||
exporters:
|
||||
prometheus:
|
||||
endpoint: "0.0.0.0:8889"
|
||||
const_labels:
|
||||
label1: value1
|
||||
otlp/jaeger:
|
||||
endpoint: jaeger:4317
|
||||
endpoint: jaeger-all-in-one:4317
|
||||
tls:
|
||||
insecure: true
|
||||
processors:
|
||||
|
@ -155,7 +148,7 @@ service:
|
|||
exporters: [ prometheus ]
|
||||
```
|
||||
|
||||
#### prometheus.yaml
|
||||
#### prometheus.yml
|
||||
|
||||
```yaml
|
||||
scrape_configs:
|
||||
|
@ -163,14 +156,8 @@ scrape_configs:
|
|||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: [ 'otel-collector:8889' ]
|
||||
- targets: [ 'otel-collector:8888' ]
|
||||
```
|
||||
|
||||
Once, configuration files are ready, use `docker compose up` to start the local setup. With successful startup, you can
|
||||
Once, configuration files are ready, use `docker-compose up` to start the local setup. With successful startup, you can
|
||||
access metrics through [Prometheus](http://localhost:9090/graph) & traces through [Jaeger](http://localhost:16686/).
|
||||
|
||||
## Metadata
|
||||
|
||||
[Flag metadata](https://openfeature.dev/specification/types/#flag-metadata) comprises auxiliary data pertaining to feature flags; it's highly valuable in telemetry signals.
|
||||
Flag metadata might consist of attributes indicating the version of the flag, an identifier for the flag set, ownership information about the flag, or other documentary information.
|
||||
flagd supports flag metadata in all its [gRPC protocols](../reference/specifications//protos.md), in [OFREP](../reference/flagd-ofrep.md), and in its [flag definitions](./flag-definitions.md#metadata).
|
||||
These attributes are returned with flag evaluations, and can be added to telemetry signals as outlined in the [OpenFeature specification](https://openfeature.dev/specification/appendix-d).
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# OpenFeature Operator
|
||||
|
||||
The OpenFeature Operator provides a convenient way to use flagd in your Kubernetes cluster.
|
||||
The OpenFeature Operator provides a convent way to using flagd in your Kubernetes cluster.
|
||||
It allows you to define feature flags as custom resources, inject flagd as a sidecar, and more.
|
||||
Please see the [installation guide](https://github.com/open-feature/open-feature-operator/blob/main/docs/installation.md) to get started.
|
||||
|
|
|
@ -0,0 +1,260 @@
|
|||
---
|
||||
description: flagd in-process proivider specification
|
||||
---
|
||||
|
||||
# Creating an in-process flagd provider
|
||||
|
||||
An in-process flagd provider is designed to be embedded into the application, and therefore no communication outside the process of the application for feature flag evaluation is needed.
|
||||
This can be desirable in some cases, particularly if latency is a concern.
|
||||
|
||||
The in-process flagd provider is responsible for creating an abstraction between the [JsonLogic](https://jsonlogic.com) based evaluation of flag configurations following the [flag configuration scheme](https://github.com/open-feature/schemas/blob/main/json/flagd-definitions.json) used by `flagd` and the OpenFeature SDK (for the [chosen technology](https://openfeature.dev/docs/reference/technologies/)).
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Understanding of [general provider concepts](https://openfeature.dev/docs/reference/concepts/provider/)
|
||||
- Proficiency in the chosen programming language (check the language isn't already covered by the [existing providers](https://openfeature.dev/ecosystem?instant_search%5BrefinementList%5D%5Bvendor%5D%5B0%5D=flagd))
|
||||
|
||||
The Flag definition containing the feature flags and JsonLogic based targeting rules shall be retrieved by the
|
||||
in-process flagd provider via a gRPC client connection to a sync server, such as [flagd-proxy](https://github.com/open-feature/flagd/tree/main/flagd-proxy).
|
||||
|
||||
## Sync source
|
||||
|
||||
An implementation of an in-process flagd-provider must accept the following environment variables which determine the sync source:
|
||||
|
||||
- `FLAGD_SOURCE_URI`: The URI identifying the sync source. Depending on the sync provider type, this can be the URI of a gRPC service providing the `sync` API required by the in-process flagd provider, or the name of a [core.openfeature.dev/v1beta1.FeatureFlag](https://github.com/open-feature/open-feature-operator/blob/main/docs/crds.md#featureflag-1) Custom Resource containing the flag definition.
|
||||
- `FLAGD_SOURCE_PROVIDER_TYPE`: The type of the provider. E.g. `grpc` or `kubernetes`.
|
||||
- `FLAGD_SOURCE_SELECTOR`: Optional selector for the feature flag definition of interest. This is used as a `selector` for the flagd-proxie's sync API to identify a flag definition within a collection of feature flag definitions.
|
||||
|
||||
An implementation of an in-process flagd provider should provide a source for retrieving the flag definition, namely a gRPC source.
|
||||
Other sources may be desired eventually, so separation of concerns should be maintained between the abstractions evaluating flags and those retrieving confirmation.
|
||||
|
||||
## gRPC sources
|
||||
|
||||
gRPC sync sources are identified by the `provider` field set to `grpc`.
|
||||
When such a sync source is specified, the in-process flagd provider should connect to the gRPC service located at the `uri` of the sync source, and use its [sync API](./protos.md#syncv1sync_serviceproto) to retrieve the feature flag definition.
|
||||
If the `selector` field of the sync source is set, that selector should be passed through to the `Sync` and `FetchAllFlags` requests sent to the gRPC server.
|
||||
|
||||
### Protobuf
|
||||
|
||||
Protobuf schemas define the contract between a client (flagd or the in-process provider implementation) and server (`flagd-proxy`).
|
||||
`flagd-proxy`'s schemas are defined [here](https://github.com/open-feature/schemas/tree/main/protobuf/sync/v1).
|
||||
|
||||
#### Code generation for gRPC sync
|
||||
|
||||
Leverage the [buf CLI](https://docs.buf.build/installation) or [protoc](https://grpc.io/docs/protoc-installation/) to generate a `flagd-proxy` client in the chosen technology:
|
||||
|
||||
Add the [open-feature schema repository](https://github.com/open-feature/schemas) as a submodule
|
||||
|
||||
```shell
|
||||
git submodule add --force https://github.com/open-feature/schemas.git
|
||||
```
|
||||
|
||||
Create a `buf.gen.{chosen language}.yaml` for the chosen language in `schemas/protobuf` (if it doesn't already exist) using one of the other files as a template (find a plugin for the chosen language [here](https://buf.build/protocolbuffers/plugins)) and create a pull request with this file.
|
||||
|
||||
Generate the code (this step ought to be automated in the build process for the chosen technology so that the generated code is never committed)
|
||||
|
||||
```shell
|
||||
cd schemas/protobuf
|
||||
buf generate --template buf.gen.{chosen language}.yaml
|
||||
```
|
||||
|
||||
As an alternative to buf, use the .proto file directly along with whatever protoc-related tools or plugins avaialble for your language.
|
||||
|
||||
Move the generated code (following convention for the chosen language) and add its location to .gitignore
|
||||
|
||||
Note that for the in-process provider only the `sync` package will be relevant, as it does not communicate with `flagd`, but only with compliant gRPC services such as `flagd-proxy`.
|
||||
|
||||
## JsonLogic evaluation
|
||||
|
||||
An in-process flagd provider should provide the feature set offered by [JsonLogic](https://jsonlogic.com) to evaluate flag resolution requests for a given context.
|
||||
If available, the JsonLogic library for the chosen technology should be used.
|
||||
Additionally, it should also provide the custom JsonLogic evaluators and `$flagd` properties in the evaluation context described below.
|
||||
|
||||
### Custom JsonLogic evaluators
|
||||
|
||||
In addition to the built-in evaluators provided by JsonLogic, the following custom targeting rules should be implemented by the provider:
|
||||
|
||||
- [Fractional operation](../../reference/custom-operations/fractional-operation.md):
|
||||
This evaluator allows splitting of the returned variants of a feature flag into different buckets, where each bucket
|
||||
can be assigned a percentage, representing how many requests will resolve to the corresponding variant.
|
||||
The sum of all weights must be 100, and the distribution must be performed by using the value of a referenced
|
||||
from the evaluation context to hash that value and map it to a value between [0, 100].
|
||||
It is important to note that evaluations MUST be sticky, meaning that flag resolution requests containing the
|
||||
same value for the referenced property in their context MUST always resolve to the same variant.
|
||||
For calculating the hash value of the referenced evaluation context property,
|
||||
the [MurmurHash3](https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp) hash function should be used.
|
||||
This is to ensure that flag resolution requests yield the same result, regardless of which implementation of
|
||||
the in-process flagd provider is being used. For more specific implementation guidelines, please refer to
|
||||
[this document](./custom-operations/fractional-operation-spec.md).
|
||||
- [Semantic version evaluation](../../reference/custom-operations/semver-operation.md):
|
||||
This evaluator checks if the given property within the evaluation context matches a semantic versioning condition.
|
||||
It returns 'true', if the value of the given property meets the condition, 'false' if not.
|
||||
For more specific implementation guidelines, please refer to [this document](../specifications/custom-operations/semver-operation-spec.md).
|
||||
- [StartsWith/EndsWith evaluation](../../reference/custom-operations/string-comparison-operation.md):
|
||||
This evaluator selects a variant based on whether the specified property within the evaluation context
|
||||
starts/ends with a certain string.
|
||||
For more specific implementation guidelines, please refer to [this document](./custom-operations/string-comparison-operation-spec.md).
|
||||
|
||||
### Targeting key
|
||||
|
||||
An in-process provider should map the [targeting-key](https://openfeature.dev/specification/glossary#targeting-key) into a top level property of the context used in rules, with the key `"targetingKey"`.
|
||||
|
||||
### $flagd properties in the evaluation context
|
||||
|
||||
An in-process flagd provider should also add the following properties to the JsonLogic evaluation context so that users can use them in their targeting rules.
|
||||
Conflicting properties in the context will be overwritten by the values below.
|
||||
|
||||
| Property | Description |
|
||||
|----------|-------------|
|
||||
| `$flagd.flagKey` | the identifier for the flag being evaluated |
|
||||
| `$flagd.timestamp`| a unix timestamp (in seconds) of the time of evaluation |
|
||||
|
||||
## Provider construction
|
||||
|
||||
(**using Go as an example**)
|
||||
|
||||
Create a provider struct/class/type (whichever is relevant to the chosen language) with an exported (public) constructor allowing configuration (e.g. `flagd` host).
|
||||
Give the provider an un-exported (private) client field, set this field as the client generated by the previous step.
|
||||
|
||||
Create methods for the provider to satisfy the chosen language SDK's provider interface.
|
||||
These methods ought to wrap the built client's methods.
|
||||
|
||||
```go
|
||||
type Provider struct {
|
||||
evaluator IJsonEvaluator
|
||||
}
|
||||
|
||||
type ProviderOption func(*Provider)
|
||||
|
||||
func NewProvider(options ...ProviderOption) *Provider {
|
||||
provider := &Provider{}
|
||||
for _, opt := range opts {
|
||||
opt(provider)
|
||||
}
|
||||
|
||||
// create a store that is responsible for retrieving the flag configurations
|
||||
// from the sources that are given to the provider via the options
|
||||
s := store.NewFlags()
|
||||
s.FlagSources = append(s.FlagSources, os.Getenv("FLAGD_SOURCE_URI"))
|
||||
s.SourceMetadata[provider.URI] = store.SourceDetails{
|
||||
Source: os.Getenv("FLAGD_SOURCE_URI"),
|
||||
Selector: os.Getenv("FLAGD_SOURCE_SELECTOR")),
|
||||
}
|
||||
|
||||
// derive evaluator
|
||||
provider.evaluator := setupJSONEvaluator(logger, s)
|
||||
|
||||
return provider
|
||||
}
|
||||
|
||||
func WithHost(host string) ProviderOption {
|
||||
return func(p *Provider) {
|
||||
p.flagdHost = host
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) BooleanEvaluation(
|
||||
ctx context.Context, flagKey string, defaultValue bool, evalCtx of.FlattenedContext,
|
||||
) of.BoolResolutionDetail {
|
||||
|
||||
res, err := p.evaluator.ResolveBoolean(ctx, flagKey, context)
|
||||
|
||||
if err != nil {
|
||||
return of.BoolResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: of.ProviderResolutionDetail{
|
||||
ResolutionError: of.NewGeneralResolutionError(err.Error()),
|
||||
Reason: of.Reason(res.Reason),
|
||||
Variant: res.Variant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return of.BoolResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: of.ProviderResolutionDetail{
|
||||
Reason: of.Reason(res.Reason),
|
||||
Variant: res.Variant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ...
|
||||
```
|
||||
|
||||
## Provider lifecycle, initialization and shutdown
|
||||
|
||||
With the release of the v0.6.0 spec, OpenFeature now outlines a lifecycle for in-process flagd provider initialization and shutdown.
|
||||
|
||||
In-process flagd providers should do the following to make use of OpenFeature v0.6.0 features:
|
||||
|
||||
- start in a `NOT_READY` state
|
||||
- fetch the flag definition specified in the sync provider sources and set `state` to `READY` or `ERROR` in the `initialization` function
|
||||
- note that the SDK will automatically emit `PROVIDER_READY`/`PROVIDER_ERROR` according to the termination of the `initialization` function
|
||||
- throw an exception or terminate abnormally if a connection cannot be established during `initialization`
|
||||
- For gRPC based sources (i.e. flagd-proxy), attempt to restore the streaming connection to flagd-proxy (if the connection cannot be established or is broken):
|
||||
- If flag definition have been retrieved previously, go into `STALE` state to indicate that flag resolution responses are based on potentially outdated Flag definition.
|
||||
- reconnection should be attempted with an exponential back-off, with a max-delay of `maxSyncRetryInterval` (see [configuration](#configuration))
|
||||
- reconnection should be attempted up to `maxSyncRetryDelay` times (see [configuration](#configuration))
|
||||
- `PROVIDER_READY` and `PROVIDER_CONFIGURATION_CHANGED` should be emitted, in that order, after successful reconnection
|
||||
- For Kubernetes sync sources, retry to retrieve the FlagConfiguration resource, using an exponential back-off strategy, with a max-delay of `maxSyncRetryInterval` (see [configuration](#configuration))
|
||||
- emit `PROVIDER_CONFIGURATION_CHANGED` event and update the ruleset when a `configuration_change` message is received on the streaming connection
|
||||
- close the streaming connection in the `shutdown` function
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> NOT_READY
|
||||
NOT_READY --> READY: initialize(), stream connected, flag configurations retrieved
|
||||
NOT_READY --> ERROR: initialize(), unable to connect (retry)
|
||||
READY --> STALE: previously retrieved flag configurations can not be retrieved anymore (emit stale*)
|
||||
STALE --> READY: connection to flag source reestablished, and latest flag configurations retrieved (emit ready*, changed*)
|
||||
STALE --> ERROR: connection reattempt failed after maxSyncRetries reached (emit error*)
|
||||
READY --> READY: configuration_change (emit changed*)
|
||||
ERROR --> READY: reconnect successful (emit ready*, changed*)
|
||||
ERROR --> ERROR: maxSyncRetries reached
|
||||
ERROR --> [*]: shutdown(), stream disconnected
|
||||
```
|
||||
|
||||
\* ready=`PROVIDER_READY`, changed=`PROVIDER_CONFIGURATION_CHANGED`, stale=`PROVIDER_STALE`, error=`PROVIDER_ERROR`
|
||||
|
||||
## Configuration
|
||||
|
||||
Expose means to configure the provider aligned with the following priority system (highest to lowest).
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
constructor-parameters -->|highest priority| environment-variables -->|lowest priority| defaults
|
||||
```
|
||||
|
||||
### Explicit declaration
|
||||
|
||||
This takes the form of parameters to the provider's constructor, it has the highest priority.
|
||||
|
||||
### Environment variables
|
||||
|
||||
Read environment variables with sensible defaults (before applying the values explicitly declared to the constructor).
|
||||
|
||||
| Option name | Environment variable name | Type | Options | Default |
|
||||
| --------------------------- | ------------------------------------- | ------- | ------------ | -------------------------------------- |
|
||||
| host | FLAGD_PROXY_HOST | string | | localhost |
|
||||
| port | FLAGD_PROXY_PORT | number | | 8013 |
|
||||
| tls | FLAGD_PROXY_TLS | boolean | | false |
|
||||
| socketPath | FLAGD_PROXY_SOCKET_PATH | string | | |
|
||||
| certPath | FLAGD_PROXY_SERVER_CERT_PATH | string | | |
|
||||
| sourceURI | FLAGD_SOURCE_URI | string | | |
|
||||
| sourceProviderType | FLAGD_SOURCE_PROVIDER_TYPE | string | | grpc |
|
||||
| sourceSelector | FLAGD_SOURCE_SELECTOR | string | | |
|
||||
| maxSyncRetries | FLAGD_MAX_SYNC_RETRIES | int | | 0 (0 means unlimited) |
|
||||
| maxSyncRetryInterval | FLAGD_MAX_SYNC_RETRY_INTERVAL | int | | 60s |
|
||||
|
||||
## Error handling
|
||||
|
||||
Handle flag evaluation errors by using the error constructors exported by the SDK (e.g. `openfeature.NewProviderNotReadyResolutionError(ConnectionError)`), thereby allowing the SDK to parse and handle the error appropriately.
|
||||
|
||||
## Post creation
|
||||
|
||||
The following steps will extend the reach of the newly created provider to other developers of the chosen technology.
|
||||
|
||||
### Open an issue to document the provider
|
||||
|
||||
Create an issue [here](https://github.com/open-feature/openfeature.dev/issues/new?assignees=&labels=provider&template=document-provider.yaml&title=%5BProvider%5D%3A+) for adding the provider to [openfeature.dev](https://openfeature.dev).
|
|
@ -1,122 +0,0 @@
|
|||
# gRPC Custom Name Resolver Proposal
|
||||
|
||||
## Details
|
||||
|
||||
| | |
|
||||
|------------------------|------------------------------------|
|
||||
| **Feature Name** | gRPC custom name resolver |
|
||||
| **Type** | enhancement |
|
||||
| **Related components** | gRPC source resolution |
|
||||
|
||||
## Summary
|
||||
|
||||
gRPC by default supports DNS resolution which is currently being used e.g. "localhost:8013" in both
|
||||
[core](https://github.com/open-feature/flagd/blob/main/core/pkg/sync/grpc/grpc_sync.go#L72-L74) and
|
||||
providers e.g. [java](https://github.com/open-feature/java-sdk-contrib/blob/main/providers/flagd/src/main/java/dev/openfeature/contrib/providers/flagd/resolver/common/ChannelBuilder.java#L53-L55).
|
||||
This covers most deployments, but with increased adoption of microservice-architecture, service discovery,
|
||||
policy-enabled service meshes (e.g. istio, envoy, consul, etc) it's necessary to support custom routing and name resolution.
|
||||
|
||||
For such cases the gRPC core libraries support few alternative resolver* also expose the required interfaces to build custom implementations:
|
||||
|
||||
### Reference
|
||||
|
||||
* [Custom Name Resolution](https://grpc.io/docs/guides/custom-name-resolution/)
|
||||
* [Java Client](https://grpc.github.io/grpc-java/javadoc/io/grpc/ManagedChannelBuilder.html#forTarget(java.lang.String))
|
||||
* [Golang](https://pkg.go.dev/google.golang.org/grpc#NewClient)
|
||||
|
||||
**Note:** There is small variation in supported alternative resolver e.g. java support `zooKeeper`
|
||||
|
||||
## Motivation
|
||||
|
||||
The main motivation is to support complex deployments with a generic custom name resolver using the interface
|
||||
provided by gRPC core*.
|
||||
|
||||
**Note**: As of now only `java` and `golang` has the required interface to create custom resolver
|
||||
|
||||
## Detailed design
|
||||
|
||||
The idea is to
|
||||
|
||||
* allow a new config option to pass the [target](https://grpc.io/docs/guides/custom-name-resolution/#life-of-a-target-string) string
|
||||
* reduce need to create/override existing implementations to simplify use of name-resolver
|
||||
|
||||
### Target String Pattern*
|
||||
|
||||
Below is an example of a custom target string which will use envoy sidecar proxy for name resolution
|
||||
|
||||
```text
|
||||
envoy://localhost:9211/flagd-sync.service
|
||||
```
|
||||
|
||||
The custom name resolver provider in this case will use the endpoint name i.e. `flagd-sync.service` as [authority](https://github.com/grpc/grpc-java/blob/master/examples/src/main/java/io/grpc/examples/nameresolve/ExampleNameResolver.java#L55-L61)
|
||||
and connect to `localhost:9211`
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant application
|
||||
participant flagd-provider
|
||||
participant proxy-sidecar-agent
|
||||
participant flagd-sync.service
|
||||
|
||||
application->>flagd-provider: Check the state of a feature flag
|
||||
flagd-provider-->>application: Get the feature flag from in-memory cache <br/> run the evaluation logic and return final state
|
||||
loop
|
||||
flagd-provider->>flagd-provider: in-memory cache
|
||||
end
|
||||
flagd-provider->>proxy-sidecar-agent: gRPC stream connection
|
||||
proxy-sidecar-agent-->>flagd-provider:
|
||||
Note right of flagd-provider: Instead host:port target string <br> "envoy://localhost:9211/flagd-sync.service" <br> will be used
|
||||
proxy-sidecar-agent->>flagd-sync.service: Apply required policy and route traffic <br> to backend nodes
|
||||
flagd-sync.service-->>proxy-sidecar-agent:
|
||||
Note right of proxy-sidecar-agent: Policy and route rules are applied based <br> on `authority` header used by the <br> gRPC client
|
||||
```
|
||||
|
||||
#### Drawbacks
|
||||
|
||||
* One of the big drawback was limited support of the language only `java` and `golang`
|
||||
* Will introduce inconsistent user experience
|
||||
* Will open the door for different use cases although this can be fixed by
|
||||
providing sdks similar to [custom connector](https://github.com/open-feature/java-sdk-contrib/tree/main/providers/flagd#custom-connector)
|
||||
* ...
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Option-1
|
||||
|
||||
Allow users to override default `authority` header as shown above in `grpcurl`, the override option was
|
||||
already supported by all major languages*
|
||||
|
||||
* [Golang](https://pkg.go.dev/google.golang.org/grpc#WithAuthority)
|
||||
* [JAVA](https://grpc.github.io/grpc-java/javadoc/io/grpc/ForwardingChannelBuilder2.html#overrideAuthority(java.lang.String))
|
||||
* [Python](https://grpc.github.io/grpc/python/glossary.html#term-channel_arguments)
|
||||
|
||||
this option is simple and easy to implement, although it will not cover all the cases it will at least help with proxy
|
||||
setup where `host_header` was used to route traffic.
|
||||
|
||||
**Ref**:
|
||||
|
||||
Java PR: <https://github.com/open-feature/java-sdk-contrib/pull/949>
|
||||
|
||||
**Note**: JS, .NET, PHP still need to be explored if this options available
|
||||
|
||||
### Option-2
|
||||
|
||||
Only support the [xDS](https://grpc.io/docs/guides/custom-load-balancing/#service-mesh) protocol which already supported by gRPC core and doesn't require any custom
|
||||
name resolver we can simply use any `target` string with `xds://` scheme. The big benefit of this approach was
|
||||
it's going to be new stranded when it comes gRPC with service mesh and eliminate any custom implementation in `flagd`
|
||||
and the gRPC core team actively adding more features e.g. mTLS
|
||||
|
||||
For more details refer the below document
|
||||
|
||||
* [gRPC xDS Feature](https://grpc.github.io/grpc/core/md_doc_grpc_xds_features.html)
|
||||
* [gRPC xDS RFC](https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md)
|
||||
|
||||
### Option-3
|
||||
|
||||
TBD
|
||||
|
||||
## Unresolved questions
|
||||
|
||||
* What to do with un-supported languages
|
||||
* Coming up with generic name resolver which will cover most of the cases not just proxy
|
||||
* ....
|
|
@ -25,7 +25,6 @@ A variant type flag response.
|
|||
| string_value | [string](#string) | | |
|
||||
| double_value | [double](#double) | | |
|
||||
| object_value | [google.protobuf.Struct](#google-protobuf-Struct) | | |
|
||||
| metadata | [google.protobuf.Struct](#google-protobuf-Struct) | | Metadata for this evaluation |
|
||||
|
||||
|
||||
|
||||
|
@ -82,7 +81,6 @@ Response body for bulk flag evaluation, used by the ResolveAll rpc.
|
|||
| Field | Type | Label | Description |
|
||||
| ----- | ---- | ----- | ----------- |
|
||||
| flags | [ResolveAllResponse.FlagsEntry](#flagd-evaluation-v1-ResolveAllResponse-FlagsEntry) | repeated | Object structure describing the evaluated flags for the provided context. |
|
||||
| metadata | [google.protobuf.Struct](#google-protobuf-Struct) | | Metadata for the bulk evaluation |
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,305 +0,0 @@
|
|||
---
|
||||
description: flagd provider specification
|
||||
---
|
||||
|
||||
# flagd Providers
|
||||
|
||||
!!! note
|
||||
|
||||
This document serves as both a specification and general documentation for flagd providers.
|
||||
For language-specific details, see the `README.md` for the provider in question.
|
||||
|
||||
flagd providers are as essential as the flagd daemon itself, acting as the "bridge" between the OpenFeature SDK and flagd.
|
||||
In fact, flagd providers may be the most crucial part of the flagd framework, as they can be used without an active flagd instance.
|
||||
This document outlines their behavior and configuration.
|
||||
|
||||
## Naming
|
||||
|
||||
Consistent with our [naming conventions](../naming.md), the flagd provider name (no matter what language or runtime is in use) is `flagd`.
|
||||
|
||||
## Overview of Resolver Types
|
||||
|
||||
There are three resolver types for flagd providers:
|
||||
|
||||
- RPC Resolver: Evaluates flags remotely by connecting to a flagd instance using the gRPC evaluation protocol.
|
||||
- In-Process Resolver: Downloads the flag set rules from a flagd instance and evaluates them locally, offering low-latency performance.
|
||||
- File Resolver: Similar to in-process in terms of performance and latency; operates offline by reading flag definitions from a file, ideal for environments without network connectivity.
|
||||
|
||||
Each resolver type has unique characteristics and use cases. Below, we delve into their specifics.
|
||||
|
||||
## flagd Provider Lifecycle
|
||||
|
||||
flagd providers are built to adhere to the [provider lifecycle](https://openfeature.dev/specification/sections/flag-evaluation/#17-provider-lifecycle-management) defined in the OpenFeature specification.
|
||||
Understanding the flagd provider lifecycle is helpful in configuring and optimizing your flagd deployment, and critical to implementing a flagd provider.
|
||||
|
||||
The lifecycle is summarized below:
|
||||
|
||||
- on initialization, attempt to connect the appropriate stream according to the resolver type ([sync](https://buf.build/open-feature/flagd/docs/main:flagd.sync.v1#flagd.sync.v1.FlagSyncService.SyncFlags) stream for in-process vs [event](https://buf.build/open-feature/flagd/docs/main:flagd.evaluation.v1#flagd.evaluation.v1.Service.EventStream) stream for RPC) and in the case of in-process, fetch the [sync-metadata](https://buf.build/open-feature/flagd/docs/main:flagd.sync.v1#flagd.sync.v1.FlagSyncService.GetMetadata)
|
||||
- if stream connection succeeds within the time specified by `deadline`, return from initialization (SDK will emit `PROVIDER_READY`) and for in-process providers, store the `flag set` rules
|
||||
- if stream connection fails or exceeds the time specified by `deadline`, abort initialization (SDK will emit `PROVIDER_ERROR`), and attempt to [reconnect](#stream-reconnection)
|
||||
- while connected:
|
||||
- flags are resolved according to resolver mode; either by calling evaluation RPCs, or by evaluating the stored `flag set` rules
|
||||
- for RPC providers, flags resolved with `reason=STATIC` are [cached](#flag-evaluation-caching)
|
||||
- if flags change the associated stream (event or sync) indicates flags have changed, flush cache, or update `flag set` rules respectively and emit `PROVIDER_CONFIGURATION_CHANGED`
|
||||
- if stream disconnects:
|
||||
- [reconnect](#stream-reconnection) with exponential backoff offered by GRPC.
|
||||
- if disconnected time <= `retryGracePeriod`
|
||||
- emit `PROVIDER_STALE`
|
||||
- RPC mode resolves `STALE` from cache where possible
|
||||
- in-process mode resolves `STALE` from stored `flag set` rules
|
||||
- if disconnected time > `retryGracePeriod`
|
||||
- emit `PROVIDER_ERROR`
|
||||
- RPC mode evaluation cache is purged
|
||||
- in-process mode resolves `STALE` from stored `flag set` rules
|
||||
- on stream reconnection:
|
||||
- emit `PROVIDER_READY` and `PROVIDER_CONFIGURATION_CHANGED`
|
||||
- in-process providers store the latest `flag set` rules
|
||||
- emit `PROVIDER_CONFIGURATION_CHANGED` event and update `flag set` rules when a `configuration_change` message is received on the streaming connection
|
||||
- on shutdown, close the streaming connection in the`shutdown` function
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> NOT_READY
|
||||
NOT_READY --> READY: initialize
|
||||
NOT_READY --> ERROR: initialize
|
||||
READY --> ERROR: disconnected, disconnected period == 0
|
||||
READY --> STALE: disconnected, disconnect period < retry grace period
|
||||
STALE --> ERROR: disconnect period >= retry grace period
|
||||
ERROR --> READY: reconnected
|
||||
ERROR --> [*]: shutdown
|
||||
|
||||
note right of STALE
|
||||
stream disconnected, attempting to reconnect,
|
||||
resolve from cache*
|
||||
resolve from flag set rules**
|
||||
STALE emitted
|
||||
end note
|
||||
|
||||
note right of READY
|
||||
stream connected,
|
||||
evaluation cache active*,
|
||||
flag set rules stored**,
|
||||
metadata fetched**
|
||||
READY emitted
|
||||
CHANGE emitted with stream messages
|
||||
end note
|
||||
|
||||
note right of ERROR
|
||||
stream disconnected, attempting to reconnect,
|
||||
evaluation cache purged*,
|
||||
ERROR emitted
|
||||
end note
|
||||
|
||||
%% * RPC providers only
|
||||
%% ** In-Process providers only
|
||||
```
|
||||
|
||||
```pseudo
|
||||
* RPC providers only
|
||||
** In-Process providers only
|
||||
```
|
||||
|
||||
### Stream Reconnection
|
||||
|
||||
When either stream (sync or event) disconnects, whether due to the associated deadline being exceeded, network error or any other cause, the provider attempts to re-establish the stream immediately, and then retries with an exponential back-off.
|
||||
We always rely on the [integrated functionality of GRPC for reconnection](https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md) and utilize [Wait-for-Ready](https://grpc.io/docs/guides/wait-for-ready/) to re-establish the stream.
|
||||
We are configuring the underlying reconnection mechanism whenever we can, based on our configuration. (not all GRPC implementations support this)
|
||||
|
||||
| language/property | min connect timeout | max backoff | initial backoff | jitter | multiplier |
|
||||
|-------------------|-----------------------------------|--------------------------|--------------------------|--------|------------|
|
||||
| GRPC property | grpc.initial_reconnect_backoff_ms | max_reconnect_backoff_ms | min_reconnect_backoff_ms | 0.2 | 1.6 |
|
||||
| Flagd property | deadlineMs | retryBackoffMaxMs | retryBackoffMs | 0.2 | 1.6 |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| default [^1] | ✅ | ✅ | ✅ | 0.2 | 1.6 |
|
||||
| js | ✅ | ✅ | ❌ | 0.2 | 1.6 |
|
||||
| java | ❌ | ❌ | ❌ | 0.2 | 1.6 |
|
||||
|
||||
[^1] : C++, Python, Ruby, Objective-C, PHP, C#, js(deprecated)
|
||||
|
||||
When disconnected, if the time since disconnection is less than `retryGracePeriod`, the provider emits `STALE` when it disconnects.
|
||||
While the provider is in state `STALE` the provider resolves values from its cache or stored flag set rules, depending on its resolver mode.
|
||||
When the time since the last disconnect first exceeds `retryGracePeriod`, the provider emits `ERROR`.
|
||||
The provider attempts to reconnect indefinitely, with a maximum interval of `retryBackoffMaxMs`.
|
||||
|
||||
## RPC Resolver
|
||||
|
||||
RPC providers use the [evaluation protocol](./protos.md#flagdevaluationv1evaluationproto) to connect to flagd, initiate the [event stream](./protos.md#eventstreamresponse), listen for changes in the flag definitions, and evaluate flags remotely by calling flagd.
|
||||
RPC providers are relatively simple to implement since they essentially call a remote flagd instance with relevant parameters, and then flagd responds with the resolved flag value.
|
||||
Of course, this means there's latency associated with RPC providers, though this is mitigated somewhat by [caching](#flag-evaluation-caching).
|
||||
|
||||
### Flag Evaluation Caching
|
||||
|
||||
In RPC mode, `flagd` uses a caching mechanism which greatly reduces latency for static flags (flags without targeting rules).
|
||||
Evaluations for flags with targeting rules are never cached.
|
||||
|
||||
!!! note
|
||||
|
||||
Evaluation caching is only relevant to when the RPC resolver is used; the in-process resolver stores a complete set of rules for a `flag set`, which means evaluation can be done locally, with low latency.
|
||||
|
||||
#### Cacheable flags
|
||||
|
||||
`flagd` sets the `reason` of a flag evaluation as `STATIC` when no targeting rules are configured for the flag.
|
||||
A client can safely store the result of a static evaluation in its cache indefinitely (until the configuration of the flag changes, see [cache invalidation](#cache-invalidation)).
|
||||
|
||||
Put simply in pseudocode:
|
||||
|
||||
```pseudo
|
||||
if reason == "STATIC" {
|
||||
isFlagCacheable = true
|
||||
}
|
||||
```
|
||||
|
||||
#### Cache invalidation
|
||||
|
||||
`flagd` emits events to the server-to-client stream, among these is the `configuration_change` event.
|
||||
The structure of this event is as such:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "delete", // ENUM:["delete","write","update"]
|
||||
"source": "/flag-configuration.json", // the source of the flag configuration
|
||||
"flagKey": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
A client should invalidate the cache of any flag found in a `configuration_change` event to prevent stale data.
|
||||
If the connection drops all cache values must be cleared (any number of events may have been missed).
|
||||
|
||||
### Client Side Providers
|
||||
|
||||
Client side flagd providers (used in mobile and front-end web applications) have unique security and performance considerations.
|
||||
These flagd providers only support the RPC resolver mode (so that `flag set` rules, which might contain sensitive information, are never sent to the client).
|
||||
Instead, these do bulk evaluations of all flags in the `flag set`, and cache the results until they are invalidated.
|
||||
Bulk evaluations take place when:
|
||||
|
||||
- the provider is initialized
|
||||
- the context is changed
|
||||
- a change in the definition notifies the provider it should re-evaluate the flags
|
||||
|
||||
This pattern is consistent with OpenFeature's [static context paradigm](https://openfeature.dev/specification/glossary#static-context-paradigm).
|
||||
|
||||
!!! note
|
||||
|
||||
To support easy integration with mobile and browser use cases, flagd's [evaluation protocol](./protos.md#flagdevaluationv1evaluationproto) is accessible over both gRPC and HTTP
|
||||
|
||||
!!! note
|
||||
|
||||
flagd supports the OFREP protocol, meaning client-side OFREP providers can also be used for client-side use-cases.
|
||||
|
||||
<!-- markdownlint-disable MD024 -->
|
||||
### Provider Metadata
|
||||
<!-- markdownlint-enable MD024 -->
|
||||
|
||||
The provider metadata includes properties returned from the [provider_ready event payload](./protos.md#eventstreamresponse) data.
|
||||
|
||||
## In-Process Resolver
|
||||
|
||||
In-process providers use the [sync schema](./protos.md#syncflagsresponse) to connect to flagd, initiate the [sync stream](./protos.md#eventstreamresponse), and download the `flag set` rules to evaluate them locally.
|
||||
In-process providers are relatively complex (compared to RPC providers) to implement since they essentially must implement more of flagd's logic to evaluate flags locally.
|
||||
Local evaluation has the impact of much lower latency and almost no serialization compared to RPC providers.
|
||||
|
||||
### JsonLogic Evaluation
|
||||
|
||||
An in-process flagd providers provide the feature set offered by [JsonLogic](https://jsonlogic.com) to evaluate flag resolution requests for a given context.
|
||||
|
||||
### Custom JsonLogic Evaluators
|
||||
|
||||
In addition to the built-in evaluators provided by JsonLogic, the following custom targeting rules are implemented by the provider:
|
||||
|
||||
- [Fractional operation](../../reference/custom-operations/fractional-operation.md)
|
||||
- [Semantic version evaluation](../../reference/custom-operations/semver-operation.md)
|
||||
- [StartsWith/EndsWith evaluation](../../reference/custom-operations/string-comparison-operation.md)
|
||||
|
||||
### Targeting Key
|
||||
|
||||
Similar to the flagd daemon, in-process providers map the [targeting-key](https://openfeature.dev/specification/glossary#targeting-key) into a top level property of the context used in rules, with the key `"targetingKey"`.
|
||||
|
||||
### `$flagd` Properties in the Evaluation Context
|
||||
|
||||
Similar to the flagd daemon, in-process flagd providers add the following properties to the JsonLogic evaluation context so that users can use them in their targeting rules.
|
||||
Conflicting properties in the context will be overwritten by the values below.
|
||||
|
||||
| Property | Description |
|
||||
| ------------------ | ------------------------------------------------------- |
|
||||
| `$flagd.flagKey` | the identifier for the flag being evaluated |
|
||||
| `$flagd.timestamp` | a unix timestamp (in seconds) of the time of evaluation |
|
||||
|
||||
### Changed Flags
|
||||
|
||||
When a new flag definition is parsed, the stored flags are compared with the newly parsed flags.
|
||||
Flags which have been removed, added, or mutated (considering, at a minimum, their `default variant`, `targeting rules`, and `metadata`) have their keys added to the `flags changed` field of the associated `PROVIDER_CONFIGURATION_CHANGED` event.`
|
||||
|
||||
### Sync-Metadata Properties in the Evaluation Context
|
||||
|
||||
In-process flagd providers also inject any properties returned by the [sync-metadata RPC response](./protos.md#getmetadataresponse) into the context.
|
||||
This allows for static properties defined in flagd to be added to in-process evaluations.
|
||||
If only a subset of the sync-metadata response is desired to be injected into the evaluation context, you can define a mapping function with the `contextEnricher` option.
|
||||
|
||||
<!-- markdownlint-disable MD024 -->
|
||||
### Provider Metadata
|
||||
<!-- markdownlint-enable MD024 -->
|
||||
|
||||
The provider metadata includes the top-level metadata properties in the [flag definition](../flag-definitions.md).
|
||||
|
||||
## File Resolver (Offline Mode)
|
||||
|
||||
The in-process resolver mode can also use a file based [flag definition](../flag-definitions.md).
|
||||
This does not connect to a flagd instance or gRPC sync implementation, and instead polls a flag definition from a file.
|
||||
If the file has been modified since the last poll (based on the file metadata) and [flags have changed](#changed-flags), a `PROVIDER_CONFIGURATION_CHANGED` event with the appropriate `changed flags` field is emitted.
|
||||
|
||||
The Evaluation uses [JsonLogic](#jsonlogic-evaluation) and [custom JsonLogic evaluators](#custom-jsonlogic-evaluators) like the [InProcess Resolver](#in-process-resolver).
|
||||
|
||||
!!! note
|
||||
|
||||
This mode does not support [context enrichment via sync-metadata](#sync-metadata-properties-in-the-evaluation-context).
|
||||
|
||||
## Configuration
|
||||
|
||||
### Configuration options
|
||||
|
||||
Most options can be defined in the constructor, or as environment variables, with constructor options having the highest
|
||||
precedence.
|
||||
|
||||
Below are the supported configuration parameters (note that not all apply to both resolver modes):
|
||||
|
||||
| Option name | Environment variable name | Explanation | Type & Values | Default | Compatible resolver |
|
||||
| --------------------- | ------------------------------ | ---------------------------------------------------------------------- | ---------------------------- | ----------------------------- | ----------------------- |
|
||||
| resolver | FLAGD_RESOLVER | mode of operation | String - `rpc`, `in-process` | rpc | rpc & in-process |
|
||||
| host | FLAGD_HOST | remote host | String | localhost | rpc & in-process |
|
||||
| port | FLAGD_PORT | remote port | int | 8013 (rpc), 8015 (in-process) | rpc & in-process |
|
||||
| targetUri | FLAGD_TARGET_URI | alternative to host/port, supporting custom name resolution | string | null | rpc & in-process |
|
||||
| tls | FLAGD_TLS | connection encryption | boolean | false | rpc & in-process |
|
||||
| socketPath | FLAGD_SOCKET_PATH | alternative to host port, unix socket | String | null | rpc & in-process |
|
||||
| certPath | FLAGD_SERVER_CERT_PATH | tls cert path | String | null | rpc & in-process |
|
||||
| deadlineMs | FLAGD_DEADLINE_MS | deadline for unary calls, and timeout for initialization | int | 500 | rpc & in-process & file |
|
||||
| streamDeadlineMs | FLAGD_STREAM_DEADLINE_MS | deadline for streaming calls, useful as an application-layer keepalive | int | 600000 | rpc & in-process |
|
||||
| retryBackoffMs | FLAGD_RETRY_BACKOFF_MS | initial backoff for stream retry | int | 1000 | rpc & in-process |
|
||||
| retryBackoffMaxMs | FLAGD_RETRY_BACKOFF_MAX_MS | maximum backoff for stream retry | int | 120000 | rpc & in-process |
|
||||
| retryGracePeriod | FLAGD_RETRY_GRACE_PERIOD | period in seconds before provider moves from STALE to ERROR state | int | 5 | rpc & in-process & file |
|
||||
| keepAliveTime | FLAGD_KEEP_ALIVE_TIME_MS | http 2 keepalive | long | 0 | rpc & in-process |
|
||||
| cache | FLAGD_CACHE | enable cache of static flags | String - `lru`, `disabled` | lru | rpc |
|
||||
| maxCacheSize | FLAGD_MAX_CACHE_SIZE | max size of static flag cache | int | 1000 | rpc |
|
||||
| selector | FLAGD_SOURCE_SELECTOR | selects a single sync source to retrieve flags from only that source | string | null | in-process |
|
||||
| providerId | FLAGD_PROVIDER_ID | A unique identifier for flagd(grpc client) initiating the request. | string | null | in-process |
|
||||
| offlineFlagSourcePath | FLAGD_OFFLINE_FLAG_SOURCE_PATH | offline, file-based flag definitions, overrides host/port/targetUri | string | null | file |
|
||||
| offlinePollIntervalMs | FLAGD_OFFLINE_POLL_MS | poll interval for reading offlineFlagSourcePath | int | 5000 | file |
|
||||
| contextEnricher | - | sync-metadata to evaluation context mapping function | function | identity function | in-process |
|
||||
|
||||
### Custom Name Resolution
|
||||
|
||||
Some implementations support [gRPC custom name resolution](https://grpc.io/docs/guides/custom-name-resolution/), and abstractions to introduce additional resolvers.
|
||||
Specifically, a custom resolver for `envoy` has been implemented in some providers, which overrides the authority header with the authority specified in the envoy URL scheme.
|
||||
Below is an example of a custom target string which will use envoy sidecar proxy for name resolution:
|
||||
|
||||
```text
|
||||
envoy://localhost:9211/flagd-sync.service
|
||||
```
|
||||
|
||||
The custom name resolver provider in this case will use the endpoint name i.e. `flagd-sync.service` as [authority](https://github.com/grpc/grpc-java/blob/master/examples/src/main/java/io/grpc/examples/nameresolve/ExampleNameResolver.java#L55-L61)
|
||||
and connect to `localhost:9211`.
|
||||
|
||||
### Metadata
|
||||
|
||||
When a flag is resolved, the returned [metadata](./flag-definitions.md#metadata) is a merged representation of the metadata defined on the flag set, and on the flag, with the flag metadata taking priority.
|
||||
Flag metadata is returned on a "best effort" basis when flags are resolved: disabled, missing or erroneous flags return the metadata of the associated flag set whenever possible.
|
||||
This is particularly important for debugging purposes and error metrics.
|
|
@ -0,0 +1,164 @@
|
|||
---
|
||||
description: flagd RPC provider specification
|
||||
---
|
||||
|
||||
# Creating an RPC flagd provider
|
||||
|
||||
By default, **flagd** is a remote service that is accessed via **grpc** by a client application to retrieve feature flags.
|
||||
Depending on the environment, flagd therefore is usually deployed as a standalone service, e.g. as a Kubernetes Deployment,
|
||||
or injected as a sidecar container into the pod running the client application,
|
||||
as it is done in the [OpenFeature Operator](https://github.com/open-feature/open-feature-operator).
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Understanding of [general provider concepts](https://openfeature.dev/docs/reference/concepts/provider/)
|
||||
- Proficiency in the chosen programming language (check the language isn't already covered by the [existing providers](../../providers/index.md))
|
||||
|
||||
## flagd Evaluation API
|
||||
|
||||
Fundamentally, RPC providers use the [evaluation schema](./protos.md#schemav1schemaproto) to connect to flagd, initiate evaluation RPCs, and listen for changes in the flag definitions.
|
||||
In order to do this, you must generate the gRPC primitives (message types and client) using the protobuf code generation mechanisms available in your language.
|
||||
If you are unable to use gRPC code generation, you can also use REST (via the [connect protocol](https://buf.build/blog/connect-a-better-grpc)) to communicate with flagd, though in this case, you will not be able to open a stream to listen for changes.
|
||||
|
||||
### Protobuf
|
||||
|
||||
Protobuf schemas define the contract between the flagd evaluation API and a client.
|
||||
|
||||
#### Code generation for gRPC sync
|
||||
|
||||
Leverage the [buf CLI](https://docs.buf.build/installation) or protoc to generate a `flagd-proxy` client in the chosen technology:
|
||||
|
||||
Add the [open-feature schema repository](https://github.com/open-feature/schemas) as a submodule
|
||||
|
||||
```shell
|
||||
git submodule add --force https://github.com/open-feature/schemas.git
|
||||
```
|
||||
|
||||
Create a `buf.gen.{chosen language}.yaml` for the chosen language in `schemas/protobuf` (if it doesn't already exist) using one of the other files as a template (find a plugin for the chosen language [here](https://buf.build/protocolbuffers/plugins)) and create a pull request with this file.
|
||||
|
||||
Generate the code (this step ought to be automated in the build process for the chosen technology so that the generated code is never committed)
|
||||
|
||||
```shell
|
||||
cd schemas/protobuf
|
||||
buf generate --template buf.gen.{chosen language}.yaml
|
||||
```
|
||||
|
||||
As an alternative to buf, use the .proto file directly along with whatever protoc-related tools or plugins available for your language.
|
||||
|
||||
Move the generated code (following convention for the chosen language) and add its location to .gitignore
|
||||
|
||||
Note that for the in-process provider only the `schema` package will be relevant, since RPC providers communicate directly to flagd.
|
||||
|
||||
## Provider lifecycle, initialization and shutdown
|
||||
|
||||
With the release of the v0.6.0 spec, OpenFeature now outlines a lifecycle for in-process flagd provider initialization and shutdown.
|
||||
|
||||
In-process flagd providers should do the following to make use of OpenFeature v0.6.0 features:
|
||||
|
||||
- start in a `NOT_READY` state
|
||||
- fetch the flag definition specified in the sync provider sources and set `state` to `READY` or `ERROR` in the `initialization` function
|
||||
- note that the SDK will automatically emit `PROVIDER_READY`/`PROVIDER_ERROR` according to the termination of the `initialization` function
|
||||
- throw an exception or terminate abnormally if a connection cannot be established during `initialization`
|
||||
- For gRPC based sources (i.e. flagd-proxy), attempt to restore the streaming connection to flagd-proxy (if the connection cannot be established or is broken):
|
||||
- If flag definition have been retrieved previously, go into `STALE` state to indicate that flag resolution responses are based on potentially outdated Flag definition.
|
||||
- reconnection should be attempted with an exponential back-off, with a max-delay of `maxSyncRetryInterval` (see [configuration](#configuration))
|
||||
- reconnection should be attempted up to `maxSyncRetryDelay` times (see [configuration](#configuration))
|
||||
- `PROVIDER_READY` and `PROVIDER_CONFIGURATION_CHANGED` should be emitted, in that order, after successful reconnection
|
||||
- For Kubernetes sync sources, retry to retrieve the FlagConfiguration resource, using an exponential back-off strategy, with a max-delay of `maxSyncRetryInterval` (see [configuration](#configuration))
|
||||
- emit `PROVIDER_CONFIGURATION_CHANGED` event and update ruleset when a `configuration_change` message is received on the streaming connection
|
||||
- close the streaming connection in the`shutdown` function
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> NOT_READY
|
||||
NOT_READY --> READY: initialize(), connection to flagd established, stream connected
|
||||
NOT_READY --> ERROR: initialize(), unable to connect or establish stream(retry)
|
||||
READY --> ERROR: stream or connection disconnected
|
||||
READY --> READY: configuration_change (emit changed*, invalidate cache)
|
||||
ERROR --> READY: reconnect successful (emit ready*, changed*, invalidate cache)
|
||||
ERROR --> ERROR: maxSyncRetries reached
|
||||
ERROR --> [*]: shutdown(), stream disconnected
|
||||
```
|
||||
|
||||
\* ready=`PROVIDER_READY`, changed=`PROVIDER_CONFIGURATION_CHANGED`, stale=`PROVIDER_STALE`, error=`PROVIDER_ERROR`
|
||||
|
||||
## Caching
|
||||
|
||||
`flagd` has a caching strategy implementable by RPC providers that support server-to-client streaming.
|
||||
|
||||
### Cacheable flags
|
||||
|
||||
`flagd` sets the `reason` of a flag evaluation as `STATIC` when no targeting rules are configured for the flag.
|
||||
A client can safely store the result of a static evaluation in its cache indefinitely (until the configuration of the flag changes, see [cache invalidation](#cache-invalidation)).
|
||||
|
||||
Put simply in pseudocode:
|
||||
|
||||
```pseudo
|
||||
if reason == "STATIC" {
|
||||
isFlagCacheable = true
|
||||
}
|
||||
```
|
||||
|
||||
### Cache invalidation
|
||||
|
||||
`flagd` emits events to the server-to-client stream, among these is the `configuration_change` event.
|
||||
The structure of this event is as such:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "delete", // ENUM:["delete","write","update"]
|
||||
"source": "/flag-configuration.json", // the source of the flag configuration
|
||||
"flagKey": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
A client should invalidate the cache of any flag found in a `configuration_change` event to prevent stale data.
|
||||
If the connection drops all cache values must be cleared (any number of events may have been missed).
|
||||
|
||||
## Configuration
|
||||
|
||||
Expose means to configure the provider aligned with the following priority system (highest to lowest).
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
constructor-parameters -->|highest priority| environment-variables -->|lowest priority| defaults
|
||||
```
|
||||
|
||||
### Explicit declaration
|
||||
|
||||
This takes the form of parameters to the provider's constructor, it has the highest priority.
|
||||
|
||||
### Environment variables
|
||||
|
||||
Read environment variables with sensible defaults (before applying the values explicitly declared to the constructor).
|
||||
|
||||
| Option name | Environment variable name | Type & Values | Default |
|
||||
|-----------------------|--------------------------------|------------------------|-----------|
|
||||
| host | FLAGD_HOST | String | localhost |
|
||||
| port | FLAGD_PORT | int | 8013 |
|
||||
| tls | FLAGD_TLS | boolean | false |
|
||||
| socketPath | FLAGD_SOCKET_PATH | String | null |
|
||||
| certPath | FLAGD_SERVER_CERT_PATH | String | null |
|
||||
| deadline | FLAGD_DEADLINE_MS | int | 500 |
|
||||
| cache | FLAGD_CACHE | String - lru, disabled | lru |
|
||||
| maxCacheSize | FLAGD_MAX_CACHE_SIZE | int | 1000 |
|
||||
| maxEventStreamRetries | FLAGD_MAX_EVENT_STREAM_RETRIES | int | 5 |
|
||||
| retryBackoffMs | FLAGD_RETRY_BACKOFF_MS | int | 1000 |
|
||||
|
||||
## Error handling
|
||||
|
||||
Handle flag evaluation errors by using the error constructors exported by the SDK (e.g. `openfeature.NewProviderNotReadyResolutionError(ConnectionError)`), thereby allowing the SDK to parse and handle the error appropriately.
|
||||
|
||||
## Post creation
|
||||
|
||||
The following steps will extend the reach of the newly created provider to other developers of the chosen technology.
|
||||
|
||||
### Open an issue to document the provider
|
||||
|
||||
Create an issue in openfeature.dev [here](https://github.com/open-feature/openfeature.dev/issues/new?assignees=&labels=provider&template=document-provider.yaml&title=%5BProvider%5D%3A+).
|
||||
This will ensure the provider is added to OpenFeature's website.
|
||||
|
||||
## Serialization of the evaluation context
|
||||
|
||||
An RPC provider should serialize the OpenFeature context for use in the `evaluation.proto`.
|
||||
It should map the [targeting-key](https://openfeature.dev/specification/glossary#targeting-key) into a top level property of the context, with the key `"targetingKey"`
|
|
@ -8,7 +8,7 @@ See [syncs](../concepts/syncs.md) for a conceptual overview.
|
|||
|
||||
## URI patterns
|
||||
|
||||
Any URI passed to flagd via the `--uri` (`-f`) flag must follow one of the 6 following patterns with prefixes to ensure that
|
||||
Any URI passed to flagd via the `--uri` (`-f`) flag must follow one of the 4 following patterns with prefixes to ensure that
|
||||
it is passed to the correct implementation:
|
||||
|
||||
| Implied Sync Provider | Prefix | Example |
|
||||
|
@ -17,26 +17,7 @@ it is passed to the correct implementation:
|
|||
| `file` | `file:` | `file:etc/flagd/my-flags.json` |
|
||||
| `http` | `http(s)://` | `https://my-flags.com/flags` |
|
||||
| `grpc` | `grpc(s)://` | `grpc://my-flags-server` |
|
||||
| [grpc](#custom-grpc-target-uri) | `[ envoy \| dns \| uds\| xds ]://` | `envoy://localhost:9211/test.service` |
|
||||
| `gcs` | `gs://` | `gs://my-bucket/my-flags.json` |
|
||||
| `azblob` | `azblob://` | `azblob://my-container/my-flags.json` |
|
||||
| `s3` | `s3://` | `s3://my-bucket/my-flags.json` |
|
||||
|
||||
### Data Serialization
|
||||
|
||||
The `file`, `http`, `gcs`, `azblob` and `s3` sync providers expect the data to be formatted as JSON or YAML.
|
||||
The file extension is used to determine the serialization format.
|
||||
If the file extension hasn't been defined, the [media type](https://en.wikipedia.org/wiki/Media_type) will be used instead.
|
||||
|
||||
### Custom gRPC Target URI
|
||||
|
||||
Apart from default `dns` resolution, Flagd also support different resolution method e.g. `xds`.
|
||||
Currently, we are supporting all [core resolver](https://grpc.io/docs/guides/custom-name-resolution/) and one custom resolver for `envoy` proxy resolution.
|
||||
For more details, please refer the [RFC](https://github.com/open-feature/flagd/blob/main/docs/reference/specifications/proposal/rfc-grpc-custom-name-resolver.md) document.
|
||||
|
||||
```shell
|
||||
./bin/flagd start -x --uri envoy://localhost:9211/test.service
|
||||
```
|
||||
|
||||
## Source Configuration
|
||||
|
||||
|
@ -50,10 +31,10 @@ Alternatively, these configurations can be passed to flagd via config file, spec
|
|||
| Field | Type | Note |
|
||||
| ----------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| uri | required `string` | Flag configuration source of the sync |
|
||||
| provider | required `string` | Provider type - `file`, `fsnotify`, `fileinfo`, `kubernetes`, `http`, `grpc`, `gcs` or `azblob` |
|
||||
| provider | required `string` | Provider type - `file`, `fsnotify`, `fileinfo`, `kubernetes`, `http`, `grpc` or `gcs` |
|
||||
| authHeader | optional `string` | Used for http sync; set this to include the complete `Authorization` header value for any authentication scheme (e.g., "Bearer token_here", "Basic base64_credentials", etc.). Cannot be used with `bearerToken` |
|
||||
| bearerToken | optional `string` | (Deprecated) Used for http sync; token gets appended to `Authorization` header with [bearer schema](https://www.rfc-editor.org/rfc/rfc6750#section-2.1). Cannot be used with `authHeader` |
|
||||
| interval | optional `uint32` | Used for http, gcs and azblob syncs; requests will be made at this interval. Defaults to 5 seconds. |
|
||||
| interval | optional `uint32` | Used for http and gcs syncs; requests will be made at this interval. Defaults to 5 seconds. |
|
||||
| tls | optional `boolean` | Enable/Disable secure TLS connectivity. Currently used only by gRPC sync. Default (ex: if unset) is false, which will use an insecure connection |
|
||||
| providerID | optional `string` | Value binds to grpc connection's providerID field. gRPC server implementations may use this to identify connecting flagd instance |
|
||||
| selector | optional `string` | Value binds to grpc connection's selector field. gRPC server implementations may use this to filter flag configurations |
|
||||
|
@ -78,14 +59,12 @@ Sync providers:
|
|||
- `file` - config/samples/example_flags.json
|
||||
- `fsnotify` - config/samples/example_flags.json
|
||||
- `fileinfo` - config/samples/example_flags.json
|
||||
- `http` - <http://my-flag-source.com/flags.json>
|
||||
- `https` - <https://my-secure-flag-source.com/flags.json>
|
||||
- `http` - <http://my-flag-source.json/>
|
||||
- `https` - <https://my-secure-flag-source.json/>
|
||||
- `kubernetes` - default/my-flag-config
|
||||
- `grpc`(insecure) - grpc-source:8080
|
||||
- `grpcs`(secure) - my-flag-source:8080
|
||||
- `grpc`(envoy) - envoy://localhost:9211/test.service
|
||||
- `gcs` - gs://my-bucket/my-flags.json
|
||||
- `azblob` - azblob://my-container/my-flags.json
|
||||
|
||||
Startup command:
|
||||
|
||||
|
@ -94,16 +73,14 @@ Startup command:
|
|||
--sources='[{"uri":"config/samples/example_flags.json","provider":"file"},
|
||||
{"uri":"config/samples/example_flags.json","provider":"fsnotify"},
|
||||
{"uri":"config/samples/example_flags.json","provider":"fileinfo"},
|
||||
{"uri":"http://my-flag-source/flags.json","provider":"http","bearerToken":"bearer-dji34ld2l"},
|
||||
{"uri":"https://secure-remote/bearer-auth/flags.json","provider":"http","authHeader":"Bearer bearer-dji34ld2l"},
|
||||
{"uri":"https://secure-remote/basic-auth/flags.json","provider":"http","authHeader":"Basic dXNlcjpwYXNz"},
|
||||
{"uri":"http://my-flag-source.json","provider":"http","bearerToken":"bearer-dji34ld2l"},
|
||||
{"uri":"https://secure-remote/bearer-auth","provider":"http","authHeader":"Bearer bearer-dji34ld2l"},
|
||||
{"uri":"https://secure-remote/basic-auth","provider":"http","authHeader":"Basic dXNlcjpwYXNz"},
|
||||
{"uri":"default/my-flag-config","provider":"kubernetes"},
|
||||
{"uri":"grpc-source:8080","provider":"grpc"},
|
||||
{"uri":"my-flag-source:8080","provider":"grpc", "maxMsgSize": 5242880},
|
||||
{"uri":"envoy://localhost:9211/test.service", "provider":"grpc"},
|
||||
{"uri":"my-flag-source:8080","provider":"grpc", "certPath": "/certs/ca.cert", "tls": true, "providerID": "flagd-weatherapp-sidecar", "selector": "source=database,app=weatherapp"},
|
||||
{"uri":"gs://my-bucket/my-flag.json","provider":"gcs"},
|
||||
{"uri":"azblob://my-container/my-flag.json","provider":"azblob"}]'
|
||||
{"uri":"gs://my-bucket/my-flag.json","provider":"gcs"}]'
|
||||
```
|
||||
|
||||
Configuration file,
|
||||
|
@ -116,7 +93,7 @@ sources:
|
|||
provider: fsnotify
|
||||
- uri: config/samples/example_flags.json
|
||||
provider: fileinfo
|
||||
- uri: http://my-flag-source/flags.json
|
||||
- uri: http://my-flag-source.json
|
||||
provider: http
|
||||
bearerToken: bearer-dji34ld2l
|
||||
- uri: default/my-flag-config
|
||||
|
@ -126,8 +103,6 @@ sources:
|
|||
- uri: my-flag-source:8080
|
||||
provider: grpc
|
||||
maxMsgSize: 5242880
|
||||
- uri: envoy://localhost:9211/test.service
|
||||
provider: grpc
|
||||
- uri: my-flag-source:8080
|
||||
provider: grpc
|
||||
certPath: /certs/ca.cert
|
||||
|
@ -136,6 +111,4 @@ sources:
|
|||
selector: "source=database,app=weatherapp"
|
||||
- uri: gs://my-bucket/my-flag.json
|
||||
provider: gcs
|
||||
- uri: azblob://my-container/my-flags.json
|
||||
provider: azblob
|
||||
```
|
||||
|
|
|
@ -49,21 +49,6 @@
|
|||
"$ref": "./targeting.json"
|
||||
}
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"title": "Flag Set Metadata",
|
||||
"description": "Metadata about the flag set, with keys of type string, and values of type boolean, string, or number.",
|
||||
"properties": {
|
||||
"flagSetId": {
|
||||
"description": "The unique identifier for the flag set.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "The version of the flag set.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"$ref": "#/definitions/metadata"
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
|
@ -87,11 +72,6 @@
|
|||
},
|
||||
"targeting": {
|
||||
"$ref": "./targeting.json"
|
||||
},
|
||||
"metadata": {
|
||||
"title": "Flag Metadata",
|
||||
"description": "Metadata about an individual feature flag, with keys of type string, and values of type boolean, string, or number.",
|
||||
"$ref": "#/definitions/metadata"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
|
@ -199,18 +179,6 @@
|
|||
"$ref": "#/definitions/objectVariants"
|
||||
}
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"description": "Any additional key/value pair with value of type boolean, string, or number.",
|
||||
"type": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean"
|
||||
]
|
||||
},
|
||||
"required": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -455,7 +455,8 @@
|
|||
}
|
||||
},
|
||||
"fractionalWeightArg": {
|
||||
"description": "Distribution for all possible variants, with their associated weighting.",
|
||||
"$comment": "if we remove the \"sum to 100\" restriction, update the descriptions below!",
|
||||
"description": "Distribution for all possible variants, with their associated weighting out of 100.",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 2,
|
||||
|
@ -465,7 +466,7 @@
|
|||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Weighted distribution for this variant key.",
|
||||
"description": "Weighted distribution for this variant key (must sum to 100).",
|
||||
"type": "number"
|
||||
}
|
||||
]
|
||||
|
@ -476,7 +477,7 @@
|
|||
"$comment": "there seems to be a bug here, where ajv gives a warning (not an error) because maxItems doesn't equal the number of entries in items, though this is valid in this case",
|
||||
"items": [
|
||||
{
|
||||
"description": "Bucketing value used in pseudorandom assignment; should be a string that is unique and stable for each subject of flag evaluation. Defaults to a concatenation of the flagKey and targetingKey.",
|
||||
"description": "Bucketing value used in pseudorandom assignment; should be unique and stable for each subject of flag evaluation. Defaults to a concatenation of the flagKey and targetingKey.",
|
||||
"$ref": "#/definitions/anyRule"
|
||||
},
|
||||
{
|
||||
|
|
|
@ -1,163 +1,5 @@
|
|||
# Changelog
|
||||
|
||||
## [0.8.0](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.6...flagd-proxy/v0.8.0) (2025-07-21)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* remove sync.Type ([#1691](https://github.com/open-feature/flagd/issues/1691))
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* remove sync.Type ([#1691](https://github.com/open-feature/flagd/issues/1691)) ([ac647e0](https://github.com/open-feature/flagd/commit/ac647e065636071f5bc065a9a084461cea692166))
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.11.8 ([#1685](https://github.com/open-feature/flagd/issues/1685)) ([c07ffba](https://github.com/open-feature/flagd/commit/c07ffba55426d538224d8564be5f35339d2258d0))
|
||||
|
||||
## [0.7.6](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.5...flagd-proxy/v0.7.6) (2025-07-15)
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.11.6 ([#1683](https://github.com/open-feature/flagd/issues/1683)) ([b6da282](https://github.com/open-feature/flagd/commit/b6da282f8a98082ba3733593d501d14842cbd97f))
|
||||
|
||||
## [0.7.5](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.4...flagd-proxy/v0.7.5) (2025-07-10)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **security:** update module github.com/go-viper/mapstructure/v2 to v2.3.0 [security] ([#1667](https://github.com/open-feature/flagd/issues/1667)) ([caa0ed0](https://github.com/open-feature/flagd/commit/caa0ed04eb9d5d01136deb71b8fcc4da72aa1910))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* add sync_context to SyncFlags ([#1642](https://github.com/open-feature/flagd/issues/1642)) ([07a45d9](https://github.com/open-feature/flagd/commit/07a45d9b2275584fa92ff33cbe5e5c7d7864db38))
|
||||
|
||||
## [0.7.4](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.3...flagd-proxy/v0.7.4) (2025-05-28)
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update dependency go to v1.24.1 ([#1559](https://github.com/open-feature/flagd/issues/1559)) ([cd46044](https://github.com/open-feature/flagd/commit/cd4604471bba0a1df67bf87653a38df3caf9d20f))
|
||||
* **security:** upgrade dependency versions ([#1632](https://github.com/open-feature/flagd/issues/1632)) ([761d870](https://github.com/open-feature/flagd/commit/761d870a3c563b8eb1b83ee543b41316c98a1d48))
|
||||
|
||||
## [0.7.3](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.2...flagd-proxy/v0.7.3) (2025-03-25)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.11.2 ([#1570](https://github.com/open-feature/flagd/issues/1570)) ([e151b1f](https://github.com/open-feature/flagd/commit/e151b1f97524a568e361103bf7a388f2598e5861))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.21.1 ([#1576](https://github.com/open-feature/flagd/issues/1576)) ([cd95193](https://github.com/open-feature/flagd/commit/cd95193f71fd465ffd1b177fa492aa84d8414a87))
|
||||
* **deps:** update module google.golang.org/grpc to v1.71.0 ([#1578](https://github.com/open-feature/flagd/issues/1578)) ([5c2c64f](https://github.com/open-feature/flagd/commit/5c2c64f878b8603dd37cbfd79b0e1588e4b5a3c6))
|
||||
|
||||
## [0.7.2](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.1...flagd-proxy/v0.7.2) (2025-02-21)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.5-20250127221518-be6d1143b690.1 ([#1549](https://github.com/open-feature/flagd/issues/1549)) ([d3eb44e](https://github.com/open-feature/flagd/commit/d3eb44ed45a54bd9152b7477cce17be90016683c))
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.11.1 ([#1545](https://github.com/open-feature/flagd/issues/1545)) ([ca663b5](https://github.com/open-feature/flagd/commit/ca663b5832c94834f73cd5449a2f28af631d9556))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.21.0 ([#1568](https://github.com/open-feature/flagd/issues/1568)) ([a3d4162](https://github.com/open-feature/flagd/commit/a3d41625a2b79452c0732af29d0b4f320e74fe8b))
|
||||
* **deps:** update module github.com/spf13/cobra to v1.9.0 ([#1564](https://github.com/open-feature/flagd/issues/1564)) ([345d2a9](https://github.com/open-feature/flagd/commit/345d2a920759e3e7046d7679a9c8a7cdb6cd3b40))
|
||||
* **deps:** update module github.com/spf13/cobra to v1.9.1 ([#1566](https://github.com/open-feature/flagd/issues/1566)) ([a48cc80](https://github.com/open-feature/flagd/commit/a48cc8023963ac0ae41e70d4fd6fb0a9f453dba9))
|
||||
* **deps:** update module golang.org/x/net to v0.35.0 ([#1557](https://github.com/open-feature/flagd/issues/1557)) ([13146e5](https://github.com/open-feature/flagd/commit/13146e5ac3de44e482f496b47dd3e0777d08c718))
|
||||
|
||||
|
||||
### 🧹 Chore
|
||||
|
||||
* **deps:** update golang docker tag to v1.24 ([#1561](https://github.com/open-feature/flagd/issues/1561)) ([130904c](https://github.com/open-feature/flagd/commit/130904c212b1f8d484b96c05dd5996286c2922cd))
|
||||
|
||||
## [0.7.1](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.7.0...flagd-proxy/v0.7.1) (2025-02-04)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.11.0 ([#1541](https://github.com/open-feature/flagd/issues/1541)) ([986a436](https://github.com/open-feature/flagd/commit/986a436e10e9766b897319085cf7dbbe2f10cb24))
|
||||
* **deps:** update module golang.org/x/sync to v0.11.0 ([#1543](https://github.com/open-feature/flagd/issues/1543)) ([7d6c0dc](https://github.com/open-feature/flagd/commit/7d6c0dc6e6e6955af1e5225807deeb2b6797900b))
|
||||
|
||||
## [0.7.0](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.11...flagd-proxy/v0.7.0) (2025-01-31)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* flagSetMetadata in OFREP/ResolveAll, core refactors ([#1540](https://github.com/open-feature/flagd/issues/1540))
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20250127221518-be6d1143b690.2 ([#1536](https://github.com/open-feature/flagd/issues/1536)) ([e23060f](https://github.com/open-feature/flagd/commit/e23060f24b2a714ae748e6b37d0d06b7caa1c95c))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.4-20241220192239-696330adaff0.1 ([#1529](https://github.com/open-feature/flagd/issues/1529)) ([8881a80](https://github.com/open-feature/flagd/commit/8881a804b4055da0127a16b8fc57022d24906e1b))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.4-20250127221518-be6d1143b690.1 ([#1537](https://github.com/open-feature/flagd/issues/1537)) ([f74207b](https://github.com/open-feature/flagd/commit/f74207bc13b75bae4275bc486df51e2da569dd41))
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.10.8 ([#1526](https://github.com/open-feature/flagd/issues/1526)) ([fbf2ed5](https://github.com/open-feature/flagd/commit/fbf2ed527fcf3b300808c7b835a8d891df7b88a8))
|
||||
* **deps:** update module google.golang.org/grpc to v1.70.0 ([#1528](https://github.com/open-feature/flagd/issues/1528)) ([79b2b0a](https://github.com/open-feature/flagd/commit/79b2b0a6bbd48676dcbdd2393feb8247529bf29c))
|
||||
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
* flagSetMetadata in OFREP/ResolveAll, core refactors ([#1540](https://github.com/open-feature/flagd/issues/1540)) ([b49abf9](https://github.com/open-feature/flagd/commit/b49abf95069da93bdf8369c8aa0ae40e698df760))
|
||||
|
||||
## [0.6.11](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.10...flagd-proxy/v0.6.11) (2025-01-19)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.10.7 ([#1521](https://github.com/open-feature/flagd/issues/1521)) ([bf8e7e0](https://github.com/open-feature/flagd/commit/bf8e7e06d9b34e0548abb8af9cce2becb357f9c4))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1524](https://github.com/open-feature/flagd/issues/1524)) ([eeae9a6](https://github.com/open-feature/flagd/commit/eeae9a64caf93356fd663cc735cc422edcf9e132))
|
||||
* Skip flagd banner when non-console logger in use ([#1516](https://github.com/open-feature/flagd/issues/1516)) ([bae9b6f](https://github.com/open-feature/flagd/commit/bae9b6fb3b53a9d73f4c36e7b676beb6dac03476))
|
||||
|
||||
## [0.6.10](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.9...flagd-proxy/v0.6.10) (2025-01-16)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.3-20241220192239-696330adaff0.1 ([#1513](https://github.com/open-feature/flagd/issues/1513)) ([64c5787](https://github.com/open-feature/flagd/commit/64c57875b032edcef2e2d230e7735990e01b72b8))
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.10.6 ([#1515](https://github.com/open-feature/flagd/issues/1515)) ([586cb62](https://github.com/open-feature/flagd/commit/586cb62e63d66c8f8371236844d506c7bcc8f123))
|
||||
|
||||
## [0.6.9](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.8...flagd-proxy/v0.6.9) (2025-01-15)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20241220192239-696330adaff0.1 ([#1489](https://github.com/open-feature/flagd/issues/1489)) ([53add83](https://github.com/open-feature/flagd/commit/53add83a491c6e00e0d9b1b64a9461e5973edca7))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/grpc/go to v1.5.1-20241220192239-696330adaff0.2 ([#1492](https://github.com/open-feature/flagd/issues/1492)) ([9f1d94a](https://github.com/open-feature/flagd/commit/9f1d94a42ac00ecf5fc58c07a76c350e2e4ec2f6))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.0-20241220192239-696330adaff0.1 ([#1490](https://github.com/open-feature/flagd/issues/1490)) ([6edce72](https://github.com/open-feature/flagd/commit/6edce72e8cff01ea13cbd15d604b35ccc8337f50))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.2-20241220192239-696330adaff0.1 ([#1502](https://github.com/open-feature/flagd/issues/1502)) ([426c36e](https://github.com/open-feature/flagd/commit/426c36e838b9ded3a23f933e66e963c8110c0ddb))
|
||||
* **deps:** update module github.com/mattn/go-colorable to v0.1.14 ([#1508](https://github.com/open-feature/flagd/issues/1508)) ([87727f7](https://github.com/open-feature/flagd/commit/87727f7f8f18e4f532d152190ada5dbe3fc915b0))
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.10.5 ([#1482](https://github.com/open-feature/flagd/issues/1482)) ([ce48cb7](https://github.com/open-feature/flagd/commit/ce48cb757659eef8807531f8522ca1b7bc80422c))
|
||||
* **deps:** update module golang.org/x/net to v0.33.0 [security] ([#1486](https://github.com/open-feature/flagd/issues/1486)) ([4764077](https://github.com/open-feature/flagd/commit/476407769f47675f649c328e27e0f87860f0f79d))
|
||||
* **deps:** update module golang.org/x/net to v0.34.0 ([#1498](https://github.com/open-feature/flagd/issues/1498)) ([7584f95](https://github.com/open-feature/flagd/commit/7584f95e4da50ae870014589a971b83b10c23873))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.2 ([#1484](https://github.com/open-feature/flagd/issues/1484)) ([6b40ad3](https://github.com/open-feature/flagd/commit/6b40ad34c83da4a3116e7cad4139a63a6c918097))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.4 ([#1510](https://github.com/open-feature/flagd/issues/1510)) ([76d6353](https://github.com/open-feature/flagd/commit/76d6353840ab8e7c93bdb0802eb1c49fc6fe1dc0))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1470](https://github.com/open-feature/flagd/issues/1470)) ([26b0b1a](https://github.com/open-feature/flagd/commit/26b0b1af8bc4b3a393c3453784b50f167f13f743))
|
||||
|
||||
## [0.6.8](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.7...flagd-proxy/v0.6.8) (2024-12-17)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.35.2-20240906125204-0a6a901b42e8.1 ([#1451](https://github.com/open-feature/flagd/issues/1451)) ([8c6d91d](https://github.com/open-feature/flagd/commit/8c6d91d538d226b10cb954c23409902e9d245cda))
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.36.0-20240906125204-0a6a901b42e8.1 ([#1475](https://github.com/open-feature/flagd/issues/1475)) ([0b11c6c](https://github.com/open-feature/flagd/commit/0b11c6cf612b244bda6bab119814647f3ce8de2e))
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.10.4 ([#1433](https://github.com/open-feature/flagd/issues/1433)) ([d33c7a5](https://github.com/open-feature/flagd/commit/d33c7a5522d0909448c6d9d80b0a33d8511f0738))
|
||||
* **deps:** update module golang.org/x/net to v0.31.0 ([#1446](https://github.com/open-feature/flagd/issues/1446)) ([9e35111](https://github.com/open-feature/flagd/commit/9e351117b4b2ebbb4a016d6b189077ae65a83124))
|
||||
* **deps:** update module golang.org/x/net to v0.32.0 ([#1458](https://github.com/open-feature/flagd/issues/1458)) ([ac0b123](https://github.com/open-feature/flagd/commit/ac0b123ce84a0772f144ae0ae8f3153992635ea4))
|
||||
* **deps:** update module golang.org/x/sync to v0.9.0 ([#1445](https://github.com/open-feature/flagd/issues/1445)) ([8893e94](https://github.com/open-feature/flagd/commit/8893e94b94ae79f80a0aa0f25cca5caf874e9d2e))
|
||||
* **deps:** update module google.golang.org/grpc to v1.68.0 ([#1442](https://github.com/open-feature/flagd/issues/1442)) ([cd27d09](https://github.com/open-feature/flagd/commit/cd27d098e6d8d8b0f681ef42d26dba1ebac67d12))
|
||||
* **deps:** update module google.golang.org/grpc to v1.68.1 ([#1456](https://github.com/open-feature/flagd/issues/1456)) ([0b6e2a1](https://github.com/open-feature/flagd/commit/0b6e2a1cd64910226d348c921b08a6de8013ac90))
|
||||
* **deps:** update module google.golang.org/grpc to v1.69.0 ([#1469](https://github.com/open-feature/flagd/issues/1469)) ([dd4869f](https://github.com/open-feature/flagd/commit/dd4869f5e095066f80c9d82d1be83155e7504d88))
|
||||
* **deps:** update opentelemetry-go monorepo ([#1447](https://github.com/open-feature/flagd/issues/1447)) ([68b5794](https://github.com/open-feature/flagd/commit/68b5794180da84af9adc1f2cd80f929489969c1c))
|
||||
|
||||
## [0.6.7](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.6...flagd-proxy/v0.6.7) (2024-10-28)
|
||||
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
* **deps:** update module buf.build/gen/go/open-feature/flagd/protocolbuffers/go to v1.35.1-20240906125204-0a6a901b42e8.1 ([#1420](https://github.com/open-feature/flagd/issues/1420)) ([1f06d5a](https://github.com/open-feature/flagd/commit/1f06d5a1837ea2b753974e96c2a1154d6cb3e582))
|
||||
* **deps:** update module github.com/open-feature/flagd/core to v0.10.3 ([#1411](https://github.com/open-feature/flagd/issues/1411)) ([a312196](https://github.com/open-feature/flagd/commit/a312196c118705d7a8eb0056fdb98480b887f7c5))
|
||||
* **deps:** update module github.com/prometheus/client_golang to v1.20.5 ([#1425](https://github.com/open-feature/flagd/issues/1425)) ([583ba89](https://github.com/open-feature/flagd/commit/583ba894f2de794b36b6a1cc3bfceb9c46dc9d96))
|
||||
* **deps:** update module golang.org/x/net to v0.30.0 ([#1417](https://github.com/open-feature/flagd/issues/1417)) ([4d5b75e](https://github.com/open-feature/flagd/commit/4d5b75eed9097c09760fcc71bfdf473cd19232ec))
|
||||
* **deps:** update module google.golang.org/grpc to v1.67.1 ([#1415](https://github.com/open-feature/flagd/issues/1415)) ([85a3a6b](https://github.com/open-feature/flagd/commit/85a3a6b46233fcc7cf71a0292b46c82ac8e66d7b))
|
||||
|
||||
## [0.6.6](https://github.com/open-feature/flagd/compare/flagd-proxy/v0.6.5...flagd-proxy/v0.6.6) (2024-09-23)
|
||||
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue